Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"There are two memory management related changes, the CMMA support for
KVM to avoid swap-in of freed pages and the split page table lock for
the PMD level. These two come with common code changes in mm/.
A fix for the long standing theoretical TLB flush problem, this one
comes with a common code change in kernel/sched/.
Another set of changes is Heikos uaccess work, included is the initial
set of patches with more to come.
And fixes and cleanups as usual"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (36 commits)
s390/con3270: optionally disable auto update
s390/mm: remove unecessary parameter from pgste_ipte_notify
s390/mm: remove unnecessary parameter from gmap_do_ipte_notify
s390/mm: fixing comment so that parameter name match
s390/smp: limit number of cpus in possible cpu mask
hypfs: Add clarification for "weight_min" attribute
s390: update defconfigs
s390/ptrace: add support for PTRACE_SINGLEBLOCK
s390/perf: make print_debug_cf() static
s390/topology: Remove call to update_cpu_masks()
s390/compat: remove compat exec domain
s390: select CONFIG_TTY for use of tty in unconditional keyboard driver
s390/appldata_os: fix cpu array size calculation
s390/checksum: remove memset() within csum_partial_copy_from_user()
s390/uaccess: remove copy_from_user_real()
s390/sclp_early: Return correct HSA block count also for zero
s390: add some drivers/subsystems to the MAINTAINERS file
s390: improve debug feature usage
s390/airq: add support for irq ranges
s390/mm: enable split page table lock for PMD level
...
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty
index a2ccec3..ad22fb0 100644
--- a/Documentation/ABI/testing/sysfs-tty
+++ b/Documentation/ABI/testing/sysfs-tty
@@ -3,8 +3,7 @@
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description:
Shows the list of currently configured
- tty devices used for the console,
- like 'tty1 ttyS0'.
+ console devices, like 'tty1 ttyS0'.
The last entry in the file is the active
device connected to /dev/console.
The file supports poll() to detect virtual
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index a8d0100..10a9369 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -82,7 +82,19 @@
has to request that the PCI layer set up the MSI capability for this
device.
-4.2.1 pci_enable_msi_range
+4.2.1 pci_enable_msi
+
+int pci_enable_msi(struct pci_dev *dev)
+
+A successful call allocates ONE interrupt to the device, regardless
+of how many MSIs the device supports. The device is switched from
+pin-based interrupt mode to MSI mode. The dev->irq number is changed
+to a new number which represents the message signaled interrupt;
+consequently, this function should be called before the driver calls
+request_irq(), because an MSI is delivered via a vector that is
+different from the vector of a pin-based interrupt.
+
+4.2.2 pci_enable_msi_range
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
@@ -147,6 +159,11 @@
return pci_enable_msi_range(pdev, nvec, nvec);
}
+Note, unlike pci_enable_msi_exact() function, which could be also used to
+enable a particular number of MSI-X interrupts, pci_enable_msi_range()
+returns either a negative errno or 'nvec' (not negative errno or 0 - as
+pci_enable_msi_exact() does).
+
4.2.1.3 Single MSI mode
The most notorious example of the request type described above is
@@ -158,7 +175,27 @@
return pci_enable_msi_range(pdev, 1, 1);
}
-4.2.2 pci_disable_msi
+Note, unlike pci_enable_msi() function, which could be also used to
+enable the single MSI mode, pci_enable_msi_range() returns either a
+negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
+does).
+
+4.2.3 pci_enable_msi_exact
+
+int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+
+This variation on pci_enable_msi_range() call allows a device driver to
+request exactly 'nvec' MSIs.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to request any more MSI interrupts for
+this device.
+
+By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
+returns zero in case of success, which indicates MSI interrupts have been
+successfully allocated.
+
+4.2.4 pci_disable_msi
void pci_disable_msi(struct pci_dev *dev)
@@ -172,7 +209,7 @@
Failure to do so results in a BUG_ON(), leaving the device with
MSI enabled and thus leaking its vector.
-4.2.3 pci_msi_vec_count
+4.2.4 pci_msi_vec_count
int pci_msi_vec_count(struct pci_dev *dev)
@@ -257,8 +294,8 @@
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- 1, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ 1, nvec);
}
Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
@@ -269,8 +306,8 @@
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- FOO_DRIVER_MINIMUM_NVEC, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ FOO_DRIVER_MINIMUM_NVEC, nvec);
}
4.3.1.2 Exact number of MSI-X interrupts
@@ -282,10 +319,15 @@
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- nvec, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ nvec, nvec);
}
+Note, unlike pci_enable_msix_exact() function, which could be also used to
+enable a particular number of MSI-X interrupts, pci_enable_msix_range()
+returns either a negative errno or 'nvec' (not negative errno or 0 - as
+pci_enable_msix_exact() does).
+
4.3.1.3 Specific requirements to the number of MSI-X interrupts
As noted above, there could be devices that can not operate with just any
@@ -332,7 +374,64 @@
any error code other than -ENOSPC indicates a fatal error and should not
be retried.
-4.3.2 pci_disable_msix
+4.3.2 pci_enable_msix_exact
+
+int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+
+This variation on pci_enable_msix_range() call allows a device driver to
+request exactly 'nvec' MSI-Xs.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to allocate any more MSI-X interrupts for
+this device.
+
+By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
+returns zero in case of success, which indicates MSI-X interrupts have been
+successfully allocated.
+
+Another version of a routine that enables MSI-X mode for a device with
+specific requirements described in chapter 4.3.1.3 might look like this:
+
+/*
+ * Assume 'minvec' and 'maxvec' are non-zero
+ */
+static int foo_driver_enable_msix(struct foo_adapter *adapter,
+ int minvec, int maxvec)
+{
+ int rc;
+
+ minvec = roundup_pow_of_two(minvec);
+ maxvec = rounddown_pow_of_two(maxvec);
+
+ if (minvec > maxvec)
+ return -ERANGE;
+
+retry:
+ rc = pci_enable_msix_exact(adapter->pdev,
+ adapter->msix_entries, maxvec);
+
+ /*
+ * -ENOSPC is the only error code allowed to be analyzed
+ */
+ if (rc == -ENOSPC) {
+ if (maxvec == 1)
+ return -ENOSPC;
+
+ maxvec /= 2;
+
+ if (minvec > maxvec)
+ return -ENOSPC;
+
+ goto retry;
+ } else if (rc < 0) {
+ return rc;
+ }
+
+ return maxvec;
+}
+
+4.3.3 pci_disable_msix
void pci_disable_msix(struct pci_dev *dev)
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 273e654d..2f0fcb2 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -31,6 +31,14 @@
(In contrast, implementation of RCU is permitted only in software licensed
under either GPL or LGPL. Sorry!!!)
+In 1987, Rashid et al. described lazy TLB-flush [RichardRashid87a].
+At first glance, this has nothing to do with RCU, but nevertheless
+this paper helped inspire the update-side batching used in the later
+RCU implementation in DYNIX/ptx. In 1988, Barbara Liskov published
+a description of Argus that noted that use of out-of-date values can
+be tolerated in some situations. Thus, this paper provides some early
+theoretical justification for use of stale data.
+
In 1990, Pugh [Pugh90] noted that explicitly tracking which threads
were reading a given data structure permitted deferred free to operate
in the presence of non-terminating threads. However, this explicit
@@ -41,11 +49,11 @@
to see how much of the performance advantage reported in 1990 remains
today.
-At about this same time, Adams [Adams91] described ``chaotic relaxation'',
-where the normal barriers between successive iterations of convergent
-numerical algorithms are relaxed, so that iteration $n$ might use
-data from iteration $n-1$ or even $n-2$. This introduces error,
-which typically slows convergence and thus increases the number of
+At about this same time, Andrews [Andrews91textbook] described ``chaotic
+relaxation'', where the normal barriers between successive iterations
+of convergent numerical algorithms are relaxed, so that iteration $n$
+might use data from iteration $n-1$ or even $n-2$. This introduces
+error, which typically slows convergence and thus increases the number of
iterations required. However, this increase is sometimes more than made
up for by a reduction in the number of expensive barrier operations,
which are otherwise required to synchronize the threads at the end
@@ -55,7 +63,8 @@
In 1992, Henry (now Alexia) Massalin completed a dissertation advising
parallel programmers to defer processing when feasible to simplify
-synchronization. RCU makes extremely heavy use of this advice.
+synchronization [HMassalinPhD]. RCU makes extremely heavy use of
+this advice.
In 1993, Jacobson [Jacobson93] verbally described what is perhaps the
simplest deferred-free technique: simply waiting a fixed amount of time
@@ -90,27 +99,29 @@
systems made pervasive use of RCU in place of "existence locks", which
greatly simplifies locking hierarchies and helps avoid deadlocks.
-2001 saw the first RCU presentation involving Linux [McKenney01a]
-at OLS. The resulting abundance of RCU patches was presented the
-following year [McKenney02a], and use of RCU in dcache was first
-described that same year [Linder02a].
+The year 2000 saw an email exchange that would likely have
+led to yet another independent invention of something like RCU
+[RustyRussell2000a,RustyRussell2000b]. Instead, 2001 saw the first
+RCU presentation involving Linux [McKenney01a] at OLS. The resulting
+abundance of RCU patches was presented the following year [McKenney02a],
+and use of RCU in dcache was first described that same year [Linder02a].
Also in 2002, Michael [Michael02b,Michael02a] presented "hazard-pointer"
techniques that defer the destruction of data structures to simplify
non-blocking synchronization (wait-free synchronization, lock-free
synchronization, and obstruction-free synchronization are all examples of
-non-blocking synchronization). In particular, this technique eliminates
-locking, reduces contention, reduces memory latency for readers, and
-parallelizes pipeline stalls and memory latency for writers. However,
-these techniques still impose significant read-side overhead in the
-form of memory barriers. Researchers at Sun worked along similar lines
-in the same timeframe [HerlihyLM02]. These techniques can be thought
-of as inside-out reference counts, where the count is represented by the
-number of hazard pointers referencing a given data structure rather than
-the more conventional counter field within the data structure itself.
-The key advantage of inside-out reference counts is that they can be
-stored in immortal variables, thus allowing races between access and
-deletion to be avoided.
+non-blocking synchronization). The corresponding journal article appeared
+in 2004 [MagedMichael04a]. This technique eliminates locking, reduces
+contention, reduces memory latency for readers, and parallelizes pipeline
+stalls and memory latency for writers. However, these techniques still
+impose significant read-side overhead in the form of memory barriers.
+Researchers at Sun worked along similar lines in the same timeframe
+[HerlihyLM02]. These techniques can be thought of as inside-out reference
+counts, where the count is represented by the number of hazard pointers
+referencing a given data structure rather than the more conventional
+counter field within the data structure itself. The key advantage
+of inside-out reference counts is that they can be stored in immortal
+variables, thus allowing races between access and deletion to be avoided.
By the same token, RCU can be thought of as a "bulk reference count",
where some form of reference counter covers all reference by a given CPU
@@ -123,8 +134,10 @@
In 2003, the K42 group described how RCU could be used to create
hot-pluggable implementations of operating-system functions [Appavoo03a].
-Later that year saw a paper describing an RCU implementation of System
-V IPC [Arcangeli03], and an introduction to RCU in Linux Journal
+Later that year saw a paper describing an RCU implementation
+of System V IPC [Arcangeli03] (following up on a suggestion by
+Hugh Dickins [Dickins02a] and an implementation by Mingming Cao
+[MingmingCao2002IPCRCU]), and an introduction to RCU in Linux Journal
[McKenney03a].
2004 has seen a Linux-Journal article on use of RCU in dcache
@@ -383,6 +396,21 @@
}
}
+@phdthesis{HMassalinPhD
+,author="H. Massalin"
+,title="Synthesis: An Efficient Implementation of Fundamental Operating
+System Services"
+,school="Columbia University"
+,address="New York, NY"
+,year="1992"
+,annotation={
+ Mondo optimizing compiler.
+ Wait-free stuff.
+ Good advice: defer work to avoid synchronization. See page 90
+ (PDF page 106), Section 5.4, fourth bullet point.
+}
+}
+
@unpublished{Jacobson93
,author="Van Jacobson"
,title="Avoid Read-Side Locking Via Delayed Free"
@@ -671,6 +699,20 @@
[Viewed October 18, 2004]"
}
+@conference{Michael02b
+,author="Maged M. Michael"
+,title="High Performance Dynamic Lock-Free Hash Tables and List-Based Sets"
+,Year="2002"
+,Month="August"
+,booktitle="{Proceedings of the 14\textsuperscript{th} Annual ACM
+Symposium on Parallel
+Algorithms and Architecture}"
+,pages="73-82"
+,annotation={
+Like the title says...
+}
+}
+
@Conference{Linder02a
,Author="Hanna Linder and Dipankar Sarma and Maneesh Soni"
,Title="Scalability of the Directory Entry Cache"
@@ -727,6 +769,24 @@
}
}
+@conference{Michael02a
+,author="Maged M. Michael"
+,title="Safe Memory Reclamation for Dynamic Lock-Free Objects Using Atomic
+Reads and Writes"
+,Year="2002"
+,Month="August"
+,booktitle="{Proceedings of the 21\textsuperscript{st} Annual ACM
+Symposium on Principles of Distributed Computing}"
+,pages="21-30"
+,annotation={
+ Each thread keeps an array of pointers to items that it is
+ currently referencing. Sort of an inside-out garbage collection
+ mechanism, but one that requires the accessing code to explicitly
+ state its needs. Also requires read-side memory barriers on
+ most architectures.
+}
+}
+
@unpublished{Dickins02a
,author="Hugh Dickins"
,title="Use RCU for System-V IPC"
@@ -735,6 +795,17 @@
,note="private communication"
}
+@InProceedings{HerlihyLM02
+,author={Maurice Herlihy and Victor Luchangco and Mark Moir}
+,title="The Repeat Offender Problem: A Mechanism for Supporting Dynamic-Sized,
+Lock-Free Data Structures"
+,booktitle={Proceedings of 16\textsuperscript{th} International
+Symposium on Distributed Computing}
+,year=2002
+,month="October"
+,pages="339-353"
+}
+
@unpublished{Sarma02b
,Author="Dipankar Sarma"
,Title="Some dcache\_rcu benchmark numbers"
@@ -749,6 +820,19 @@
}
}
+@unpublished{MingmingCao2002IPCRCU
+,Author="Mingming Cao"
+,Title="[PATCH]updated ipc lock patch"
+,month="October"
+,year="2002"
+,note="Available:
+\url{https://lkml.org/lkml/2002/10/24/262}
+[Viewed February 15, 2014]"
+,annotation={
+ Mingming Cao's patch to introduce RCU to SysV IPC.
+}
+}
+
@unpublished{LinusTorvalds2003a
,Author="Linus Torvalds"
,Title="Re: {[PATCH]} small fixes in brlock.h"
@@ -982,6 +1066,23 @@
}
}
+@article{MagedMichael04a
+,author="Maged M. Michael"
+,title="Hazard Pointers: Safe Memory Reclamation for Lock-Free Objects"
+,Year="2004"
+,Month="June"
+,journal="IEEE Transactions on Parallel and Distributed Systems"
+,volume="15"
+,number="6"
+,pages="491-504"
+,url="Available:
+\url{http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf}
+[Viewed March 1, 2005]"
+,annotation={
+ New canonical hazard-pointer citation.
+}
+}
+
@phdthesis{PaulEdwardMcKenneyPhD
,author="Paul E. McKenney"
,title="Exploiting Deferred Destruction:
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 9126619..9d10d1d 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -256,10 +256,10 @@
variations on this theme.
b. Limiting update rate. For example, if updates occur only
- once per hour, then no explicit rate limiting is required,
- unless your system is already badly broken. The dcache
- subsystem takes this approach -- updates are guarded
- by a global lock, limiting their rate.
+ once per hour, then no explicit rate limiting is
+ required, unless your system is already badly broken.
+ Older versions of the dcache subsystem take this approach,
+ guarding updates with a global lock, limiting their rate.
c. Trusted update -- if updates can only be done manually by
superuser or some other trusted user, then it might not
@@ -268,7 +268,8 @@
the machine.
d. Use call_rcu_bh() rather than call_rcu(), in order to take
- advantage of call_rcu_bh()'s faster grace periods.
+ advantage of call_rcu_bh()'s faster grace periods. (This
+ is only a partial solution, though.)
e. Periodically invoke synchronize_rcu(), permitting a limited
number of updates per grace period.
@@ -276,6 +277,13 @@
The same cautions apply to call_rcu_bh(), call_rcu_sched(),
call_srcu(), and kfree_rcu().
+ Note that although these primitives do take action to avoid memory
+ exhaustion when any given CPU has too many callbacks, a determined
+ user could still exhaust memory. This is especially the case
+ if a system with a large number of CPUs has been configured to
+ offload all of its RCU callbacks onto a single CPU, or if the
+ system has relatively little free memory.
+
9. All RCU list-traversal primitives, which include
rcu_dereference(), list_for_each_entry_rcu(), and
list_for_each_safe_rcu(), must be either within an RCU read-side
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e6b72d3..68c0f51 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -124,12 +124,11 @@
Updating on-disk metadata
-------------------------
-On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
-written. If no such requests are made then commits will occur every
-second. This means the cache behaves like a physical disk that has a
-write cache (the same is true of the thin-provisioning target). If
-power is lost you may lose some recent writes. The metadata should
-always be consistent in spite of any crash.
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the cache behaves like a physical disk that has a volatile write
+cache. If power is lost you may lose some recent writes. The metadata
+should always be consistent in spite of any crash.
The 'dirty' state for a cache block changes far too frequently for us
to keep updating it on the fly. So we treat it as a hint. In normal
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 8a7a3d4..05a27e9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -116,6 +116,35 @@
userspace daemon can use this to detect a situation where a new table
already exceeds the threshold.
+A low water mark for the metadata device is maintained in the kernel and
+will trigger a dm event if free space on the metadata device drops below
+it.
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the thin-provisioning target behaves like a physical disk that has
+a volatile write cache. If power is lost you may lose some recent
+writes. The metadata should always be consistent in spite of any crash.
+
+If data space is exhausted the pool will either error or queue IO
+according to the configuration (see: error_if_no_space). If metadata
+space is exhausted or a metadata operation fails: the pool will error IO
+until the pool is taken offline and repair is performed to 1) fix any
+potential inconsistencies and 2) clear the flag that imposes repair.
+Once the pool's metadata device is repaired it may be resized, which
+will allow the pool to return to normal operation. Note that if a pool
+is flagged as needing repair, the pool's data and metadata devices
+cannot be resized until repair is performed. It should also be noted
+that when the pool's metadata space is exhausted the current metadata
+transaction is aborted. Given that the pool will cache IO whose
+completion may have already been acknowledged to upper IO layers
+(e.g. filesystem) it is strongly suggested that consistency checks
+(e.g. fsck) be performed on those layers when repair of the pool is
+required.
+
Thin provisioning
-----------------
@@ -258,10 +287,9 @@
should register for the event and then check the target's status.
held metadata root:
- The location, in sectors, of the metadata root that has been
+ The location, in blocks, of the metadata root that has been
'held' for userspace read access. '-' indicates there is no
- held root. This feature is not yet implemented so '-' is
- always returned.
+ held root.
discard_passdown|no_discard_passdown
Whether or not discards are actually being passed down to the
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 34dc40c..af9b4a0 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -91,7 +91,7 @@
compatible = "ti,omap3-beagle", "ti,omap3"
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
- compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
- OMAP4 SDP : Software Development Board
compatible = "ti,omap4-sdp", "ti,omap4430"
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index a6a352c..5992dce 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -21,9 +21,9 @@
must appear in the same order as the output clocks.
- #clock-cells: Must be 1
- clock-output-names: The name of the clocks as free-form strings
- - renesas,indices: Indices of the gate clocks into the group (0 to 31)
+ - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
-The clocks, clock-output-names and renesas,indices properties contain one
+The clocks, clock-output-names and renesas,clock-indices properties contain one
entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
gate clocks must not be declared.
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 68b83ec..ee9be99 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -1,12 +1,16 @@
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
Required properties:
-- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
- "fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
- "fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
- "fsl,imx6q-sdma". The -to variants should be preferred since they
- allow to determnine the correct ROM script addresses needed for
- the driver to work without additional firmware.
+- compatible : Should be one of
+ "fsl,imx25-sdma"
+ "fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma"
+ "fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma"
+ "fsl,imx51-sdma"
+ "fsl,imx53-sdma"
+ "fsl,imx6q-sdma"
+ The -to variants should be preferred since they allow to determnine the
+ correct ROM script addresses needed for the driver to work without additional
+ firmware.
- reg : Should contain SDMA registers location and length
- interrupts : Should contain SDMA interrupt
- #dma-cells : Must be <3>.
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c..4fc3927 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -7,3 +7,4 @@
Optional properties:
- local-mac-address : Ethernet mac address to use
+- vdd-supply: supply for Ethernet mac
diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
new file mode 100644
index 0000000..2dc127c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
@@ -0,0 +1,22 @@
+* OpenCores MAC 10/100 Mbps
+
+Required properties:
+- compatible: Should be "opencores,ethoc".
+- reg: two memory regions (address and length),
+ first region is for the device registers and descriptor rings,
+ second is for the device packet memory.
+- interrupts: interrupt for the device.
+
+Optional properties:
+- clocks: phandle to refer to the clk used as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Examples:
+
+ enet0: ethoc@fd030000 {
+ compatible = "opencores,ethoc";
+ reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
+ interrupts = <1>;
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
similarity index 98%
rename from Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
rename to Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
index 9e9e9ef..c119deb 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
@@ -1,4 +1,4 @@
-Broadcom Capri Pin Controller
+Broadcom BCM281xx Pin Controller
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@@ -7,14 +7,14 @@
Required Properties:
-- compatible: Must be "brcm,capri-pinctrl".
+- compatible: Must be "brcm,bcm11351-pinctrl"
- reg: Base address of the PAD Controller register block and the size
of the block.
For example, the following is the bare minimum node:
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
@@ -119,7 +119,7 @@
Example:
// pin controller node
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcmbcm11351-pinctrl";
reg = <0x35004800 0x430>;
// pin configuration node
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7116fda..67755ea 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1011,6 +1011,13 @@
parameter will force ia64_sal_cache_flush to call
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
+ forcepae [X86-32]
+ Forcefully enable Physical Address Extension (PAE).
+ Many Pentium M systems disable PAE but may have a
+ functionally usable PAE implementation.
+ Warning: use of this parameter will taint the kernel
+ and may cause unknown problems.
+
ftrace=[tracer]
[FTRACE] will set and start the specified tracer
as early as possible in order to facilitate early
@@ -2053,8 +2060,8 @@
IOAPICs that may be present in the system.
nokaslr [X86]
- Disable kernel base offset ASLR (Address Space
- Layout Randomization) if built into the kernel.
+ Disable kernel and module base offset ASLR (Address
+ Space Layout Randomization) if built into the kernel.
noautogroup Disable scheduler automatic task group creation.
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index 827104f..f3cd299 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -162,7 +162,18 @@
To reduce its OS jitter, do any of the following:
1. Run your workload at a real-time priority, which will allow
preempting the kworker daemons.
-2. Do any of the following needed to avoid jitter that your
+2. A given workqueue can be made visible in the sysfs filesystem
+ by passing the WQ_SYSFS to that workqueue's alloc_workqueue().
+ Such a workqueue can be confined to a given subset of the
+ CPUs using the /sys/devices/virtual/workqueue/*/cpumask sysfs
+ files. The set of WQ_SYSFS workqueues can be displayed using
+ "ls sys/devices/virtual/workqueue". That said, the workqueues
+ maintainer would like to caution people against indiscriminately
+ sprinkling WQ_SYSFS across all the workqueues. The reason for
+ caution is that it is easy to add WQ_SYSFS, but because sysfs is
+ part of the formal user/kernel API, it can be nearly impossible
+ to remove it, even if its addition was a mistake.
+3. Do any of the following needed to avoid jitter that your
application cannot tolerate:
a. Build your kernel with CONFIG_SLUB=y rather than
CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 102dc19..11c1d20 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -608,26 +608,30 @@
b = p; /* BUG: Compiler can reorder!!! */
do_something();
-The solution is again ACCESS_ONCE(), which preserves the ordering between
-the load from variable 'a' and the store to variable 'b':
+The solution is again ACCESS_ONCE() and barrier(), which preserves the
+ordering between the load from variable 'a' and the store to variable 'b':
q = ACCESS_ONCE(a);
if (q) {
+ barrier();
ACCESS_ONCE(b) = p;
do_something();
} else {
+ barrier();
ACCESS_ONCE(b) = p;
do_something_else();
}
-You could also use barrier() to prevent the compiler from moving
-the stores to variable 'b', but barrier() would not prevent the
-compiler from proving to itself that a==1 always, so ACCESS_ONCE()
-is also needed.
+The initial ACCESS_ONCE() is required to prevent the compiler from
+proving the value of 'a', and the pair of barrier() invocations are
+required to prevent the compiler from pulling the two identical stores
+to 'b' out from the legs of the "if" statement.
It is important to note that control dependencies absolutely require a
a conditional. For example, the following "optimized" version of
-the above example breaks ordering:
+the above example breaks ordering, which is why the barrier() invocations
+are absolutely required if you have identical stores in both legs of
+the "if" statement:
q = ACCESS_ONCE(a);
ACCESS_ONCE(b) = p; /* BUG: No ordering vs. load from a!!! */
@@ -643,9 +647,11 @@
for example, as follows:
if (ACCESS_ONCE(a) > 0) {
+ barrier();
ACCESS_ONCE(b) = q / 2;
do_something();
} else {
+ barrier();
ACCESS_ONCE(b) = q / 3;
do_something_else();
}
@@ -659,9 +665,11 @@
q = ACCESS_ONCE(a);
if (q % MAX) {
+ barrier();
ACCESS_ONCE(b) = p;
do_something();
} else {
+ barrier();
ACCESS_ONCE(b) = p;
do_something_else();
}
@@ -723,8 +731,13 @@
use smb_rmb(), smp_wmb(), or, in the case of prior stores and
later loads, smp_mb().
+ (*) If both legs of the "if" statement begin with identical stores
+ to the same variable, a barrier() statement is required at the
+ beginning of each leg of the "if" statement.
+
(*) Control dependencies require at least one run-time conditional
- between the prior load and the subsequent store. If the compiler
+ between the prior load and the subsequent store, and this
+ conditional must involve the prior load. If the compiler
is able to optimize the conditional away, it will have also
optimized away the ordering. Careful use of ACCESS_ONCE() can
help to preserve the needed conditional.
@@ -1249,6 +1262,23 @@
while perfectly safe in single-threaded code, can be fatal in concurrent
code. Here are some examples of these sorts of optimizations:
+ (*) The compiler is within its rights to reorder loads and stores
+ to the same variable, and in some cases, the CPU is within its
+ rights to reorder loads to the same variable. This means that
+ the following code:
+
+ a[0] = x;
+ a[1] = x;
+
+ Might result in an older value of x stored in a[1] than in a[0].
+ Prevent both the compiler and the CPU from doing this as follows:
+
+ a[0] = ACCESS_ONCE(x);
+ a[1] = ACCESS_ONCE(x);
+
+ In short, ACCESS_ONCE() provides cache coherence for accesses from
+ multiple CPUs to a single variable.
+
(*) The compiler is within its rights to merge successive loads from
the same variable. Such merging can cause the compiler to "optimize"
the following code:
@@ -1644,12 +1674,12 @@
Memory operations issued after the ACQUIRE will be completed after the
ACQUIRE operation has completed.
- Memory operations issued before the ACQUIRE may be completed after the
- ACQUIRE operation has completed. An smp_mb__before_spinlock(), combined
- with a following ACQUIRE, orders prior loads against subsequent stores and
- stores and prior stores against subsequent stores. Note that this is
- weaker than smp_mb()! The smp_mb__before_spinlock() primitive is free on
- many architectures.
+ Memory operations issued before the ACQUIRE may be completed after
+ the ACQUIRE operation has completed. An smp_mb__before_spinlock(),
+ combined with a following ACQUIRE, orders prior loads against
+ subsequent loads and stores and also orders prior stores against
+ subsequent stores. Note that this is weaker than smp_mb()! The
+ smp_mb__before_spinlock() primitive is free on many architectures.
(2) RELEASE operation implication:
@@ -1694,24 +1724,21 @@
ACQUIRE M, STORE *B, STORE *A, RELEASE M
-This same reordering can of course occur if the lock's ACQUIRE and RELEASE are
-to the same lock variable, but only from the perspective of another CPU not
-holding that lock.
+When the ACQUIRE and RELEASE are a lock acquisition and release,
+respectively, this same reordering can occur if the lock's ACQUIRE and
+RELEASE are to the same lock variable, but only from the perspective of
+another CPU not holding that lock. In short, a ACQUIRE followed by an
+RELEASE may -not- be assumed to be a full memory barrier.
-In short, a RELEASE followed by an ACQUIRE may -not- be assumed to be a full
-memory barrier because it is possible for a preceding RELEASE to pass a
-later ACQUIRE from the viewpoint of the CPU, but not from the viewpoint
-of the compiler. Note that deadlocks cannot be introduced by this
-interchange because if such a deadlock threatened, the RELEASE would
-simply complete.
-
-If it is necessary for a RELEASE-ACQUIRE pair to produce a full barrier, the
-ACQUIRE can be followed by an smp_mb__after_unlock_lock() invocation. This
-will produce a full barrier if either (a) the RELEASE and the ACQUIRE are
-executed by the same CPU or task, or (b) the RELEASE and ACQUIRE act on the
-same variable. The smp_mb__after_unlock_lock() primitive is free on many
-architectures. Without smp_mb__after_unlock_lock(), the critical sections
-corresponding to the RELEASE and the ACQUIRE can cross:
+Similarly, the reverse case of a RELEASE followed by an ACQUIRE does not
+imply a full memory barrier. If it is necessary for a RELEASE-ACQUIRE
+pair to produce a full barrier, the ACQUIRE can be followed by an
+smp_mb__after_unlock_lock() invocation. This will produce a full barrier
+if either (a) the RELEASE and the ACQUIRE are executed by the same
+CPU or task, or (b) the RELEASE and ACQUIRE act on the same variable.
+The smp_mb__after_unlock_lock() primitive is free on many architectures.
+Without smp_mb__after_unlock_lock(), the CPU's execution of the critical
+sections corresponding to the RELEASE and the ACQUIRE can cross, so that:
*A = a;
RELEASE M
@@ -1722,7 +1749,36 @@
ACQUIRE N, STORE *B, STORE *A, RELEASE M
-With smp_mb__after_unlock_lock(), they cannot, so that:
+It might appear that this reordering could introduce a deadlock.
+However, this cannot happen because if such a deadlock threatened,
+the RELEASE would simply complete, thereby avoiding the deadlock.
+
+ Why does this work?
+
+ One key point is that we are only talking about the CPU doing
+ the reordering, not the compiler. If the compiler (or, for
+ that matter, the developer) switched the operations, deadlock
+ -could- occur.
+
+ But suppose the CPU reordered the operations. In this case,
+ the unlock precedes the lock in the assembly code. The CPU
+ simply elected to try executing the later lock operation first.
+ If there is a deadlock, this lock operation will simply spin (or
+ try to sleep, but more on that later). The CPU will eventually
+ execute the unlock operation (which preceded the lock operation
+ in the assembly code), which will unravel the potential deadlock,
+ allowing the lock operation to succeed.
+
+ But what if the lock is a sleeplock? In that case, the code will
+ try to enter the scheduler, where it will eventually encounter
+ a memory barrier, which will force the earlier unlock operation
+ to complete, again unraveling the deadlock. There might be
+ a sleep-unlock race, but the locking primitive needs to resolve
+ such races properly in any case.
+
+With smp_mb__after_unlock_lock(), the two critical sections cannot overlap.
+For example, with the following code, the store to *A will always be
+seen by other CPUs before the store to *B:
*A = a;
RELEASE M
@@ -1730,13 +1786,18 @@
smp_mb__after_unlock_lock();
*B = b;
-will always occur as either of the following:
+The operations will always occur in one of the following orders:
- STORE *A, RELEASE, ACQUIRE, STORE *B
- STORE *A, ACQUIRE, RELEASE, STORE *B
+ STORE *A, RELEASE, ACQUIRE, smp_mb__after_unlock_lock(), STORE *B
+ STORE *A, ACQUIRE, RELEASE, smp_mb__after_unlock_lock(), STORE *B
+ ACQUIRE, STORE *A, RELEASE, smp_mb__after_unlock_lock(), STORE *B
If the RELEASE and ACQUIRE were instead both operating on the same lock
-variable, only the first of these two alternatives can occur.
+variable, only the first of these alternatives can occur. In addition,
+the more strongly ordered systems may rule out some of the above orders.
+But in any case, as noted earlier, the smp_mb__after_unlock_lock()
+ensures that the store to *A will always be seen as happening before
+the store to *B.
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
@@ -2757,7 +2818,7 @@
combination of elements combined or discarded, provided the program's view of
the world remains consistent. Note that ACCESS_ONCE() is -not- optional
in the above example, as there are architectures where a given CPU might
-interchange successive loads to the same location. On such architectures,
+reorder successive loads to the same location. On such architectures,
ACCESS_ONCE() does whatever is necessary to prevent this, for example, on
Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the
special ld.acq and st.rel instructions that prevent such reordering.
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index f3089d4..0cbe6ec 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -554,12 +554,6 @@
not specified in the struct can_frame and therefore it is only valid in
CANFD_MTU sized CAN FD frames.
- As long as the payload length is <=8 the received CAN frames from CAN FD
- capable CAN devices can be received and read by legacy sockets too. When
- user-generated CAN FD frames have a payload length <=8 these can be send
- by legacy CAN network interfaces too. Sending CAN FD frames with payload
- length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
-
Implementation hint for new CAN applications:
To build a CAN FD aware application use struct canfd_frame as basic CAN
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index b261229..c6af4ba 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -226,9 +226,9 @@
void *rx_ring, *tx_ring;
/* Configure ring parameters */
- if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
exit(1);
- if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
exit(1)
/* Calculate size of each individual ring */
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 1404674..6fea79e 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -453,7 +453,7 @@
enabled previously with setsockopt() and
the PACKET_COPY_THRESH option.
- The number of frames than can be buffered to
+ The number of frames that can be buffered to
be read with recvfrom is limited like a normal socket.
See the SO_RCVBUF option in the socket (7) man page.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 661d3c3..048c92b 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -21,26 +21,38 @@
SO_TIMESTAMPING:
-Instructs the socket layer which kind of information is wanted. The
-parameter is an integer with some of the following bits set. Setting
-other bits is an error and doesn't change the current state.
+Instructs the socket layer which kind of information should be collected
+and/or reported. The parameter is an integer with some of the following
+bits set. Setting other bits is an error and doesn't change the current
+state.
-SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware
-SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp
- as generated by the hardware
-SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp
-SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to
- the system time base
-SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
- software
+Four of the bits are requests to the stack to try to generate
+timestamps. Any combination of them is valid.
-SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
-SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
-following control message:
+SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware
+SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software
+SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware
+SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software
+
+The other three bits control which timestamps will be reported in a
+generated control message. If none of these bits are set or if none of
+the set bits correspond to data that is available, then the control
+message will not be generated:
+
+SOF_TIMESTAMPING_SOFTWARE: report systime if available
+SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
+SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
+
+It is worth noting that timestamps may be collected for reasons other
+than being requested by a particular socket with
+SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that
+can generate hardware receive timestamps ignore
+SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag
+in case future drivers pay attention.
+
+If timestamps are reported, they will appear in a control message with
+cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like
+this:
struct scm_timestamping {
struct timespec systime;
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index e55124e..ec8be46 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -320,10 +320,11 @@
==============================================================
-hung_task_warning:
+hung_task_warnings:
The maximum number of warnings to report. During a check interval
-When this value is reached, no more the warnings will be reported.
+if a hung task is detected, this value is decreased by 1.
+When this value reaches 0, no more warnings will be reported.
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
-1: report an infinite number of warnings.
@@ -441,8 +442,7 @@
feature is too high then the rate the kernel samples for NUMA hinting
faults may be controlled by the numa_balancing_scan_period_min_ms,
numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms,
-numa_balancing_scan_size_mb, numa_balancing_settle_count sysctls and
-numa_balancing_migrate_deferred.
+numa_balancing_scan_size_mb, and numa_balancing_settle_count sysctls.
==============================================================
@@ -483,13 +483,6 @@
numa_balancing_scan_size_mb is how many megabytes worth of pages are
scanned for a given scan.
-numa_balancing_migrate_deferred is how many page migrations get skipped
-unconditionally, after a page migration is skipped because a page is shared
-with other tasks. This reduces page migration overhead, and determines
-how much stronger the "move task near its memory" policy scheduler becomes,
-versus the "move memory near its task" memory management policy, for workloads
-with shared memory.
-
==============================================================
osrelease, ostype & version:
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index cb81741d..a75e3ad 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -182,7 +182,7 @@
0226/1 2.02+(3 ext_loader_ver Extended boot loader version
0227/1 2.02+(3 ext_loader_type Extended boot loader ID
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
-022C/4 2.03+ ramdisk_max Highest legal initrd address
+022C/4 2.03+ initrd_addr_max Highest legal initrd address
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
0235/1 2.10+ min_alignment Minimum alignment, as a power of two
@@ -534,7 +534,7 @@
zero, the kernel will assume that your boot loader does not support
the 2.02+ protocol.
-Field name: ramdisk_max
+Field name: initrd_addr_max
Type: read
Offset/size: 0x22c/4
Protocol: 2.03+
diff --git a/MAINTAINERS b/MAINTAINERS
index a177f74..79031a5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -73,7 +73,8 @@
L: Mailing list that is relevant to this area
W: Web-page with status/info
Q: Patchwork web based patch tracking system site
- T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit.
+ T: SCM tree type and location.
+ Type is one of: git, hg, quilt, stgit, topgit
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -473,7 +474,7 @@
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
S: Maintained
F: drivers/char/agp/
F: include/linux/agp*
@@ -538,7 +539,7 @@
ALTERA UART/JTAG UART SERIAL DRIVERS
M: Tobias Klauser <tklauser@distanz.ch>
L: linux-serial@vger.kernel.org
-L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers)
+L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
F: drivers/tty/serial/altera_uart.c
F: drivers/tty/serial/altera_jtaguart.c
@@ -910,11 +911,11 @@
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
-M: Shawn Guo <shawn.guo@linaro.org>
+M: Shawn Guo <shawn.guo@freescale.com>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/mach-imx/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
@@ -1612,11 +1613,11 @@
F: drivers/net/wireless/atmel*
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
-M: Bradley Grove <linuxdrivers@attotech.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.attotech.com
-S: Supported
-F: drivers/scsi/esas2r
+M: Bradley Grove <linuxdrivers@attotech.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.attotech.com
+S: Supported
+F: drivers/scsi/esas2r
AUDIT SUBSYSTEM
M: Eric Paris <eparis@redhat.com>
@@ -1737,6 +1738,7 @@
BLACKFIN ARCHITECTURE
M: Steven Miao <realmz6@gmail.com>
L: adi-buildroot-devel@lists.sourceforge.net
+T: git git://git.code.sf.net/p/adi-linux/code
W: http://blackfin.uclinux.org
S: Supported
F: arch/blackfin/
@@ -1830,8 +1832,8 @@
F: include/net/bluetooth/
BONDING DRIVER
-M: Jay Vosburgh <fubar@us.ibm.com>
-M: Veaceslav Falico <vfalico@redhat.com>
+M: Jay Vosburgh <j.vosburgh@gmail.com>
+M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
@@ -1860,6 +1862,7 @@
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
M: Christian Daudt <bcm@fixthebug.org>
+M: Matt Porter <mporter@linaro.org>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://git.github.com/broadcom/bcm11351
S: Maintained
@@ -2158,7 +2161,7 @@
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@freescale.com>
-T: git://github.com/hzpeterchen/linux-usb.git
+T: git git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/chipidea/
@@ -2178,9 +2181,9 @@
F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER
-M: Upinder Malhi <umalhi@cisco.com>
-S: Supported
-F: drivers/infiniband/hw/usnic
+M: Upinder Malhi <umalhi@cisco.com>
+S: Supported
+F: drivers/infiniband/hw/usnic
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
@@ -2377,20 +2380,20 @@
F: drivers/cpufreq/arm_big_little_dt.c
CPUIDLE DRIVER - ARM BIG LITTLE
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-M: Daniel Lezcano <daniel.lezcano@linaro.org>
-L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
-S: Maintained
-F: drivers/cpuidle/cpuidle-big_little.c
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Daniel Lezcano <daniel.lezcano@linaro.org>
+L: linux-pm@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+S: Maintained
+F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVERS
M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@@ -2408,8 +2411,10 @@
CPUSETS
M: Li Zefan <lizefan@huawei.com>
+L: cgroups@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroups/cpusets.txt
F: include/linux/cpuset.h
@@ -2455,9 +2460,9 @@
F: sound/pci/cs5535audio/
CW1200 WLAN driver
-M: Solomon Peachy <pizza@shaftnet.org>
-S: Maintained
-F: drivers/net/wireless/cw1200/
+M: Solomon Peachy <pizza@shaftnet.org>
+S: Maintained
+F: drivers/net/wireless/cw1200/
CX18 VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
@@ -2608,9 +2613,9 @@
M: Oliver Neukum <oliver@neukum.org>
M: Ali Akcaagac <aliakc@web.de>
M: Jamie Lenehan <lenehan@twibble.org>
-W: http://twibble.org/dist/dc395x/
L: dc395x@twibble.org
-L: http://lists.twibble.org/mailman/listinfo/dc395x/
+W: http://twibble.org/dist/dc395x/
+W: http://lists.twibble.org/mailman/listinfo/dc395x/
S: Maintained
F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
@@ -2796,9 +2801,9 @@
F: drivers/acpi/dock.c
DOCUMENTATION
-M: Rob Landley <rob@landley.net>
+M: Randy Dunlap <rdunlap@infradead.org>
L: linux-doc@vger.kernel.org
-T: TBD
+T: quilt http://www.infradead.org/~rdunlap/Doc/patches/
S: Maintained
F: Documentation/
@@ -2845,12 +2850,22 @@
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: include/drm/
F: include/uapi/drm/
+RADEON DRM DRIVERS
+M: Alex Deucher <alexander.deucher@amd.com>
+M: Christian König <christian.koenig@amd.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Supported
+F: drivers/gpu/drm/radeon/
+F: include/drm/radeon*
+F: include/uapi/drm/radeon*
+
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
M: Jani Nikula <jani.nikula@linux.intel.com>
@@ -3082,6 +3097,8 @@
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
+M: Borislav Petkov <bp@alien8.de>
+M: Mauro Carvalho Chehab <m.chehab@samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Supported
@@ -4528,6 +4545,7 @@
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
+M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
@@ -4545,6 +4563,7 @@
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/
+F: drivers/net/ethernet/intel/*/
INTEL-MID GPIO DRIVER
M: David Cohen <david.a.cohen@linux.intel.com>
@@ -4901,7 +4920,7 @@
KCONFIG
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
-T: git://gitorious.org/linux-kconfig/linux-kconfig
+T: git git://gitorious.org/linux-kconfig/linux-kconfig
S: Maintained
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
@@ -5458,11 +5477,11 @@
F: drivers/media/tuners/m88ts2022*
MA901 MASTERKIT USB FM RADIO DRIVER
-M: Alexey Klimov <klimov.linux@gmail.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/radio/radio-ma901.c
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-ma901.c
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
@@ -5498,6 +5517,11 @@
L: linux-man@vger.kernel.org
S: Maintained
+MARVELL ARMADA DRM SUPPORT
+M: Russell King <rmk+kernel@arm.linux.org.uk>
+S: Maintained
+F: drivers/gpu/drm/armada/
+
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -5618,7 +5642,7 @@
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Amir Vadai <amirv@mellanox.com>
-L: netdev@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
@@ -5659,7 +5683,7 @@
F: include/uapi/mtd/
MEN A21 WATCHDOG DRIVER
-M: Johannes Thumshirn <johannes.thumshirn@men.de>
+M: Johannes Thumshirn <johannes.thumshirn@men.de>
L: linux-watchdog@vger.kernel.org
S: Supported
F: drivers/watchdog/mena21_wdt.c
@@ -5715,20 +5739,20 @@
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
+T: git git://openfabrics.org/~eli/connect-ib.git
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
Mellanox MLX5 IB driver
-M: Eli Cohen <eli@mellanox.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
-S: Supported
-F: include/linux/mlx5/
-F: drivers/infiniband/hw/mlx5/
+M: Eli Cohen <eli@mellanox.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+T: git git://openfabrics.org/~eli/connect-ib.git
+S: Supported
+F: include/linux/mlx5/
+F: drivers/infiniband/hw/mlx5/
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
@@ -5980,6 +6004,9 @@
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
+F: tools/net/
+F: tools/testing/selftests/net/
+F: lib/random32.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@@ -6153,6 +6180,12 @@
F: drivers/block/nvme*
F: include/linux/nvme.h
+NXP TDA998X DRM DRIVER
+M: Russell King <rmk+kernel@arm.linux.org.uk>
+S: Supported
+F: drivers/gpu/drm/i2c/tda998x_drv.c
+F: include/drm/i2c/tda998x.h
+
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@@ -8465,8 +8498,8 @@
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
-L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
+W: http://groups.google.com/group/linux-iscsi-target-dev
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
S: Supported
F: drivers/target/
@@ -8707,17 +8740,17 @@
F: drivers/media/radio/radio-raremono.c
THERMAL
-M: Zhang Rui <rui.zhang@intel.com>
-M: Eduardo Valentin <eduardo.valentin@ti.com>
-L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
-Q: https://patchwork.kernel.org/project/linux-pm/list/
-S: Supported
-F: drivers/thermal/
-F: include/linux/thermal.h
-F: include/linux/cpu_cooling.h
-F: Documentation/devicetree/bindings/thermal/
+M: Zhang Rui <rui.zhang@intel.com>
+M: Eduardo Valentin <eduardo.valentin@ti.com>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+Q: https://patchwork.kernel.org/project/linux-pm/list/
+S: Supported
+F: drivers/thermal/
+F: include/linux/thermal.h
+F: include/linux/cpu_cooling.h
+F: Documentation/devicetree/bindings/thermal/
THINGM BLINK(1) USB RGB LED DRIVER
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -9751,7 +9784,6 @@
XFS FILESYSTEM
P: Silicon Graphics Inc
M: Dave Chinner <david@fromorbit.com>
-M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
@@ -9820,7 +9852,7 @@
L: mjpeg-users@lists.sourceforge.net
L: linux-media@vger.kernel.org
W: http://mjpeg.sourceforge.net/driver-zoran/
-T: Mercurial http://linuxtv.org/hg/v4l-dvb
+T: hg http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes
F: drivers/media/pci/zoran/
diff --git a/Makefile b/Makefile
index 893d6f0e..e5ac8a6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@@ -605,10 +605,11 @@
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
stackp-flag := -fstack-protector
ifeq ($(call cc-option, $(stackp-flag)),)
- $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
- -fstack-protector not supported by compiler))
+ $(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \
+ -fstack-protector not supported by compiler)
endif
-else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
+else
+ifdef CONFIG_CC_STACKPROTECTOR_STRONG
stackp-flag := -fstack-protector-strong
ifeq ($(call cc-option, $(stackp-flag)),)
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
@@ -618,6 +619,7 @@
# Force off for distro compilers that enable stack protector by default.
stackp-flag := $(call cc-option, -fno-stack-protector)
endif
+endif
KBUILD_CFLAGS += $(stackp-flag)
# This warning generated too much noise in a regular build.
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index a73a8e2..7736f42 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,7 +1,8 @@
-generic-y += clkdev.h
+generic-y += clkdev.h
generic-y += exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 0d33629..e76fd79 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,15 +1,15 @@
generic-y += auxvec.h
generic-y += barrier.h
-generic-y += bugs.h
generic-y += bitsperlong.h
+generic-y += bugs.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
-generic-y += fcntl.h
generic-y += fb.h
+generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hash.h
@@ -22,6 +22,7 @@
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
@@ -30,6 +31,7 @@
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
@@ -48,4 +50,3 @@
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 6b58c1d..400c663 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -282,7 +282,7 @@
#else
/* if V-P const for loop, PTAG can be written once outside loop */
if (full_page_op)
- write_aux_reg(ARC_REG_DC_PTAG, paddr);
+ write_aux_reg(aux_tag, paddr);
#endif
while (num_lines-- > 0) {
@@ -296,7 +296,7 @@
write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES;
#else
- write_aux_reg(aux, paddr);
+ write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
#endif
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e254198..1594945 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1578,6 +1578,7 @@
choice
prompt "Memory split"
+ depends on MMU
default VMSPLIT_3G
help
Select the desired split between kernel and user memory.
@@ -1595,6 +1596,7 @@
config PAGE_OFFSET
hex
+ default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0xC0000000
@@ -1903,6 +1905,7 @@
depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
+ depends on MMU
select ARM_PSCI
select SWIOTLB_XEN
select ARCH_DMA_ADDR_T_64BIT
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index 47279aa..0714e03 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
ashldi3.S
+bswapsdi2.S
font.c
lib1funcs.S
hyp-stub.S
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 6d1e43d..0320303 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -209,7 +209,8 @@
omap3-n900.dtb \
omap3-n9.dtb \
omap3-n950.dtb \
- omap3-tobi.dtb \
+ omap3-overo-tobi.dtb \
+ omap3-overo-storm-tobi.dtb \
omap3-gta04.dtb \
omap3-igep0020.dtb \
omap3-igep0030.dtb \
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 4718ec4..486880b 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -121,7 +121,7 @@
ti,model = "AM335x-EVMSK";
ti,audio-codec = <&tlv320aic3106>;
ti,mcasp-controller = <&mcasp1>;
- ti,codec-clock-rate = <24576000>;
+ ti,codec-clock-rate = <24000000>;
ti,audio-routing =
"Headphone Jack", "HPLOUT",
"Headphone Jack", "HPROUT";
@@ -256,6 +256,12 @@
>;
};
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ >;
+ };
+
mcasp1_pins: mcasp1_pins {
pinctrl-single,pins = <
0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
@@ -456,6 +462,9 @@
status = "okay";
vmmc-supply = <&vmmc_reg>;
bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
};
&sham {
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 6660968..9480cf8 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -23,6 +23,7 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
+ eth3 = ð3;
};
cpus {
@@ -291,7 +292,7 @@
interrupts = <91>;
};
- ethernet@34000 {
+ eth3: ethernet@34000 {
compatible = "marvell,armada-370-neta";
reg = <0x34000 0x4000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index e491b82..792fde1 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -147,7 +147,7 @@
};
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 2b76524..187fd46 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -379,15 +379,6 @@
#clock-cells = <1>;
};
- pmu_intc: pmu-interrupt-ctrl@d0050 {
- compatible = "marvell,dove-pmu-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0xd0050 0x8>;
- interrupts = <33>;
- marvell,#interrupts = <7>;
- };
-
pinctrl: pin-ctrl@d0200 {
compatible = "marvell,dove-pinctrl";
reg = <0xd0200 0x10>;
@@ -610,8 +601,6 @@
rtc: real-time-clock@d8500 {
compatible = "marvell,orion-rtc";
reg = <0xd8500 0x20>;
- interrupt-parent = <&pmu_intc>;
- interrupts = <5>;
};
gpio2: gpio-ctrl@e8400 {
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi
index 2363593..ef58d1c 100644
--- a/arch/arm/boot/dts/keystone-clocks.dtsi
+++ b/arch/arm/boot/dts/keystone-clocks.dtsi
@@ -612,7 +612,7 @@
compatible = "ti,keystone,psc-clock";
clocks = <&chipclk13>;
clock-output-names = "vcp-3";
- reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
+ reg = <0x023500a8 0xb00>, <0x02350060 0x400>;
reg-names = "control", "domain";
domain-id = <24>;
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index b9b55c9..d3b253b 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -13,7 +13,7 @@
/ {
model = "OMAP3 GTA04";
- compatible = "ti,omap3-gta04", "ti,omap3";
+ compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
@@ -32,7 +32,7 @@
aux-button {
label = "aux";
linux,code = <169>;
- gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
gpio-key,wakeup;
};
};
@@ -92,6 +92,8 @@
bmp085@77 {
compatible = "bosch,bmp085";
reg = <0x77>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <17 IRQ_TYPE_EDGE_RISING>;
};
/* leds */
@@ -141,8 +143,8 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
- vmmc_aux-supply = <&vsim>;
bus-width = <4>;
+ ti,non-removable;
};
&mmc2 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 25a2b5f..f2779ac 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -14,7 +14,7 @@
/ {
model = "IGEPv2 (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020", "ti,omap3";
+ compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 145c58c..2793749 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -13,7 +13,7 @@
/ {
model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030", "ti,omap3";
+ compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 39828ce..9938b5d 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -14,5 +14,5 @@
/ {
model = "Nokia N9";
- compatible = "nokia,omap3-n9", "ti,omap3";
+ compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 6fc85f9..0bf40c9 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2013 Pavel Machek <pavel@ucw.cz>
- * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi>
+ * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 (or later) as
@@ -13,7 +13,7 @@
/ {
model = "Nokia N900";
- compatible = "nokia,omap3-n900", "ti,omap3";
+ compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index b076a52..261c558 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -14,5 +14,5 @@
/ {
model = "Nokia N950";
- compatible = "nokia,omap3-n950", "ti,omap3";
+ compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
new file mode 100644
index 0000000..966b5c9
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Tobi expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+#include "omap3-overo-tobi-common.dtsi"
+
+/ {
+ model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+};
+
diff --git a/arch/arm/boot/dts/omap3-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
similarity index 93%
rename from arch/arm/boot/dts/omap3-tobi.dts
rename to arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
index 7e4ad2a..4edc013 100644
--- a/arch/arm/boot/dts/omap3-tobi.dts
+++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
@@ -13,9 +13,6 @@
#include "omap3-overo.dtsi"
/ {
- model = "TI OMAP3 Gumstix Overo on Tobi";
- compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3";
-
leds {
compatible = "gpio-leds";
heartbeat {
diff --git a/arch/arm/boot/dts/omap3-overo-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi.dts
new file mode 100644
index 0000000..de5653e
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-tobi.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Tobi expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap34xx.dtsi"
+#include "omap3-overo-tobi-common.dtsi"
+
+/ {
+ model = "OMAP35xx Gumstix Overo on Tobi";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
+};
+
diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi
index a461d2f..5970999 100644
--- a/arch/arm/boot/dts/omap3-overo.dtsi
+++ b/arch/arm/boot/dts/omap3-overo.dtsi
@@ -9,9 +9,6 @@
/*
* The Gumstix Overo must be combined with an expansion board.
*/
-/dts-v1/;
-
-#include "omap34xx.dtsi"
/ {
pwmleds {
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index 6c31c26..db58cad 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -8,8 +8,8 @@
*/
#include "sama5d3.dtsi"
#include "sama5d3_can.dtsi"
-#include "sama5d3_emac.dtsi"
#include "sama5d3_gmac.dtsi"
+#include "sama5d3_emac.dtsi"
#include "sama5d3_lcd.dtsi"
#include "sama5d3_mci2.dtsi"
#include "sama5d3_tcb1.dtsi"
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 10666ca..d4d2763 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -426,7 +426,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 6496159..79fd412 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -383,7 +383,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 320335a..c463fd7 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -346,7 +346,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 9ff0948..6f25cf5 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -454,7 +454,7 @@
rtc: rtc@01c20d00 {
compatible = "allwinner,sun7i-a20-rtc";
reg = <0x01c20d00 0x20>;
- interrupts = <0 24 1>;
+ interrupts = <0 24 4>;
};
sid: eeprom@01c23800 {
@@ -463,7 +463,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <0 29 4>;
};
@@ -596,10 +596,10 @@
hstimer@01c60000 {
compatible = "allwinner,sun7i-a20-hstimer";
reg = <0x01c60000 0x1000>;
- interrupts = <0 81 1>,
- <0 82 1>,
- <0 83 1>,
- <0 84 1>;
+ interrupts = <0 81 4>,
+ <0 82 4>,
+ <0 83 4>,
+ <0 84 4>;
clocks = <&ahb_gates 28>;
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 389e987..44ec401 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -57,6 +57,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -72,6 +74,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 480ecda..48d2a7f 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -94,6 +94,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -109,6 +111,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 9104224..1e156d9 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -28,7 +28,7 @@
compatible = "nvidia,cardhu", "nvidia,tegra30";
aliases {
- rtc0 = "/i2c@7000d000/tps6586x@34";
+ rtc0 = "/i2c@7000d000/tps65911@2d";
rtc1 = "/rtc@7000e000";
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index ed8e770..19a84e9 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -170,6 +170,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -185,6 +187,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
deleted file mode 100644
index 3f123ec..0000000
--- a/arch/arm/boot/dts/testcases/tests.dtsi
+++ /dev/null
@@ -1,2 +0,0 @@
-/include/ "tests-phandle.dtsi"
-/include/ "tests-interrupts.dtsi"
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index f43907c..65f6577 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
-/include/ "versatile-ab.dts"
+#include <versatile-ab.dts>
/ {
model = "ARM Versatile PB";
@@ -47,4 +47,4 @@
};
};
-/include/ "testcases/tests.dtsi"
+#include <testcases.dtsi>
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 00fe9e9..27d69b5 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -204,7 +204,10 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_ONESHOT=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 3278afe..23e728e 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,16 +7,19 @@
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += sections.h
generic-y += segment.h
@@ -33,5 +36,3 @@
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4b..4afb376 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
*/
#define UL(x) _AC(x, UL)
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
@@ -104,10 +105,6 @@
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET PLAT_PHYS_OFFSET
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84..2fe85ff 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -20,9 +20,6 @@
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define mc_capable() (cpu_topology[0].socket_id != -1)
-#define smt_capable() (cpu_topology[0].thread_id != -1)
-
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 47cd974..c96ecac 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -177,6 +177,18 @@
.long __proc_info_end
.size __lookup_processor_type_data, . - __lookup_processor_type_data
+__error_lpae:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_lpae
+ bl printascii
+ b __error
+str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
+#else
+ b __error
+#endif
+ .align
+ENDPROC(__error_lpae)
+
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 914616e..f5f381d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,7 +102,7 @@
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
- blo __error_p @ only classic page table format
+ blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 92f7b15..adabeab 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -30,7 +30,6 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
-#include <linux/cpuidle.h>
#include <linux/leds.h>
#include <linux/reboot.h>
@@ -133,7 +132,11 @@
void (*arm_pm_idle)(void);
-static void default_idle(void)
+/*
+ * Called from the core idle loop.
+ */
+
+void arch_cpu_idle(void)
{
if (arm_pm_idle)
arm_pm_idle();
@@ -168,15 +171,6 @@
#endif
/*
- * Called from the core idle loop.
- */
-void arch_cpu_idle(void)
-{
- if (cpuidle_idle_call())
- default_idle();
-}
-
-/*
* Called by kexec, immediately prior to machine_kexec().
*
* This must completely disable all secondary CPUs; simply causing those CPUs
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 1d8248e..bd18bb8 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -878,7 +878,8 @@
unsigned long cmd,
void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT &&
+ __hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL);
return NOTIFY_OK;
}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index ddc1553..0d68d40 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -220,6 +220,10 @@
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
+ * A function pointer with a value of 0xffffffff has a special meaning,
+ * and is used to implement __hyp_get_vectors in the same way as in
+ * arch/arm/kernel/hyp_stub.S.
+ *
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
@@ -363,6 +367,11 @@
host_switch_to_hyp:
pop {r0, r1, r2}
+ /* Check for __hyp_get_vectors */
+ cmp r0, #-1
+ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
+ beq 1f
+
push {lr}
mrs lr, SPSR
push {lr}
@@ -378,7 +387,7 @@
pop {lr}
msr SPSR_csxf, lr
pop {lr}
- eret
+1: eret
guest_trap:
load_vcpu @ Load VCPU pointer to r0
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index befcaf5..ec41964 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -101,11 +101,9 @@
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
-ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
# i.MX6SL reuses i.MX6Q code
obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
-endif
# i.MX5 based machines
obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 59c3b9b..baf439d 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -144,13 +144,11 @@
void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
-#ifdef CONFIG_PM
void imx6q_pm_init(void);
void imx6q_pm_set_ccm_base(void __iomem *base);
+#ifdef CONFIG_PM
void imx5_pm_init(void);
#else
-static inline void imx6q_pm_init(void) {}
-static inline void imx6q_pm_set_ccm_base(void __iomem *base) {}
static inline void imx5_pm_init(void) {}
#endif
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 91449c5..85089d8 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -156,6 +156,7 @@
.register_dev = 1,
.hmc_mode = 16,
.pins[0] = 6,
+ .extcon = "tahvo-usb",
};
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index e2ce4f8..0af7ca0 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -50,6 +50,7 @@
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
@@ -63,6 +64,7 @@
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select CPU_V7
select MULTI_IRQ_HANDLER
@@ -72,6 +74,7 @@
depends on ARCH_MULTI_V7
select CPU_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select MULTI_IRQ_HANDLER
select ARM_GIC
select MACH_OMAP_GENERIC
@@ -80,6 +83,7 @@
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
@@ -268,9 +272,6 @@
default y
select OMAP_PACKAGE_CBB
-config MACH_NOKIA_N800
- bool
-
config MACH_NOKIA_N810
bool
@@ -281,7 +282,6 @@
bool "Nokia N800/N810"
depends on SOC_OMAP2420
default y
- select MACH_NOKIA_N800
select MACH_NOKIA_N810
select MACH_NOKIA_N810_WIMAX
select OMAP_PACKAGE_ZAC
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 3b05aea..11ed915 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -433,7 +433,9 @@
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
+ .set_rate = &omap3_clkoutx2_set_rate,
.recalc_rate = &omap3_clkoutx2_recalc,
+ .round_rate = &omap3_clkoutx2_round_rate,
};
static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c158c8..01fc710 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -23,6 +23,8 @@
#include "prm.h"
#include "clockdomain.h"
+#define MAX_CPUS 2
+
/* Machine specific information */
struct idle_statedata {
u32 cpu_state;
@@ -48,11 +50,11 @@
},
};
-static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
-static struct clockdomain *cpu_clkdm[NR_CPUS];
+static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
+static struct clockdomain *cpu_clkdm[MAX_CPUS];
static atomic_t abort_barrier;
-static bool cpu_done[NR_CPUS];
+static bool cpu_done[MAX_CPUS];
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
/* Private functions */
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 3185ced..3c418ea 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -623,25 +623,12 @@
/* Clock control for DPLL outputs */
-/**
- * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
- *
- * Using parent clock DPLL data, look up DPLL state. If locked, set our
- * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
- */
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
- unsigned long parent_rate)
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
{
- const struct dpll_data *dd;
- unsigned long rate;
- u32 v;
struct clk_hw_omap *pclk = NULL;
struct clk *parent;
- if (!parent_rate)
- return 0;
-
/* Walk up the parents of clk, looking for a DPLL */
do {
do {
@@ -656,9 +643,35 @@
/* clk does not have a DPLL as a parent? error in the clock data */
if (!pclk) {
WARN_ON(1);
- return 0;
+ return NULL;
}
+ return pclk;
+}
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @clk: DPLL output struct clk
+ *
+ * Using parent clock DPLL data, look up DPLL state. If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ const struct dpll_data *dd;
+ unsigned long rate;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!parent_rate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
dd = pclk->dpll_data;
WARN_ON(!dd->enable_mask);
@@ -672,6 +685,55 @@
return rate;
}
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ const struct dpll_data *dd;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!*prate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
+ dd = pclk->dpll_data;
+
+ /* TYPE J does not have a clkoutx2 */
+ if (dd->flags & DPLL_J_TYPE) {
+ *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
+ return *prate;
+ }
+
+ WARN_ON(!dd->enable_mask);
+
+ v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* If in bypass, the rate is fixed to the bypass rate*/
+ if (v != OMAP3XXX_EN_DPLL_LOCKED)
+ return *prate;
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / 2);
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return *prate * 2;
+}
+
/* OMAP3/4 non-CORE DPLL clkops */
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
.allow_idle = omap3_dpll_allow_idle,
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index d24926e..ab43755 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1339,7 +1339,7 @@
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#ifdef CONFIG_MTD_NAND
+#if IS_ENABLED(CONFIG_MTD_NAND)
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
@@ -1429,7 +1429,7 @@
}
#endif
-#ifdef CONFIG_MTD_ONENAND
+#if IS_ENABLED(CONFIG_MTD_ONENAND)
static int gpmc_probe_onenand_child(struct platform_device *pdev,
struct device_node *child)
{
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index d408b15..af432b1 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -179,15 +179,6 @@
.length = L4_EMU_34XX_SIZE,
.type = MT_DEVICE
},
-#if defined(CONFIG_DEBUG_LL) && \
- (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
- {
- .virtual = ZOOM_UART_VIRT,
- .pfn = __phys_to_pfn(ZOOM_UART_BASE),
- .length = SZ_1M,
- .type = MT_DEVICE
- },
-#endif
};
#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 42d8188..1f33f5d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1947,29 +1947,31 @@
goto dis_opt_clks;
_write_sysconfig(v, oh);
+
+ if (oh->class->sysc->srst_udelay)
+ udelay(oh->class->sysc->srst_udelay);
+
+ c = _wait_softreset_complete(oh);
+ if (c == MAX_MODULE_SOFTRESET_WAIT) {
+ pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
+ oh->name, MAX_MODULE_SOFTRESET_WAIT);
+ ret = -ETIMEDOUT;
+ goto dis_opt_clks;
+ } else {
+ pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
+ }
+
ret = _clear_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
_write_sysconfig(v, oh);
- if (oh->class->sysc->srst_udelay)
- udelay(oh->class->sysc->srst_udelay);
-
- c = _wait_softreset_complete(oh);
- if (c == MAX_MODULE_SOFTRESET_WAIT)
- pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
- oh->name, MAX_MODULE_SOFTRESET_WAIT);
- else
- pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
-
/*
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
* _wait_target_ready() or _reset()
*/
- ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
-
dis_opt_clks:
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_disable_optional_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c..810c205 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1365,11 +1365,10 @@
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d5b24d..c33e07e 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -22,6 +22,8 @@
#include "common-board-devices.h"
#include "dss-common.h"
#include "control.h"
+#include "omap-secure.h"
+#include "soc.h"
struct pdata_init {
const char *compatible;
@@ -169,6 +171,22 @@
omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
}
+
+static void __init nokia_n900_legacy_init(void)
+{
+ hsmmc2_internal_input_clk();
+
+ if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+ if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
+ pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
+ /* set IBE to 1 */
+ rx51_secure_update_aux_cr(BIT(6), 0);
+ } else {
+ pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
+ pr_warning("Thumb binaries may crash randomly without this workaround\n");
+ }
+ }
+}
#endif /* CONFIG_ARCH_OMAP3 */
#ifdef CONFIG_ARCH_OMAP4
@@ -239,6 +257,7 @@
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
+ OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
/* Only on am3517 */
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
@@ -259,7 +278,7 @@
static struct pdata_init pdata_quirks[] __initdata = {
#ifdef CONFIG_ARCH_OMAP3
{ "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
- { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
+ { "nokia,omap3-n900", nokia_n900_legacy_init, },
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 6334b96..280f3c5 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -183,11 +183,11 @@
OMAP4_PRM_RSTCTRL_OFFSET);
v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
/* OCP barrier */
v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
}
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index f70583f..29997bd 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -38,6 +38,7 @@
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/reboot.h>
+#include <linux/regulator/fixed.h>
#include <linux/regulator/max1586.h>
#include <linux/slab.h>
#include <linux/i2c/pxa-i2c.h>
@@ -714,6 +715,10 @@
{ GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" },
};
+static struct regulator_consumer_supply fixed_5v0_consumers[] = {
+ REGULATOR_SUPPLY("power", "pwm-backlight"),
+};
+
static void __init mioa701_machine_init(void)
{
int rc;
@@ -753,6 +758,10 @@
pxa_set_i2c_info(&i2c_pdata);
pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
+
+ regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers,
+ ARRAY_SIZE(fixed_5v0_consumers),
+ 5000000);
}
static void mioa701_machine_exit(void)
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d..50e1d85 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+#include "hardware.h" /* Gives GPIO_MAX */
+
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 4ae0286..f55b05a 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/suspend.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <linux/clk/tegra.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 303a285..6191603 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -73,10 +73,20 @@
static void __init tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
+ static const struct of_device_id pl310_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", },
+ {}
+ };
+
+ struct device_node *np;
int ret;
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
u32 aux_ctrl, cache_type;
+ np = of_find_matching_node(NULL, pl310_ids);
+ if (!np)
+ return;
+
cache_type = readl(p + L2X0_CACHE_TYPE);
aux_ctrl = (cache_type & 0x700) << (17-8);
aux_ctrl |= 0x7C400001;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a77450..11b3914 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1358,7 +1358,7 @@
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (gfp & GFP_ATOMIC)
+ if (!(gfp & __GFP_WAIT))
return __iommu_alloc_atomic(dev, size, handle);
/*
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 2b3a564..ef69152 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -264,6 +264,9 @@
note_page(st, addr, 3, pmd_val(*pmd));
else
walk_pte(st, pmd, addr);
+
+ if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
+ note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
}
}
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 71c53ec..3bdfdda 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -12,6 +12,7 @@
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -22,12 +23,14 @@
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -38,8 +41,8 @@
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
-generic-y += switch_to.h
generic-y += swab.h
+generic-y += switch_to.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
@@ -49,5 +52,3 @@
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3..453a179 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#ifdef CONFIG_SMP
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@
}
#define __my_cpu_offset __my_cpu_offset()
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd..aa3917c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 82ce217..a4654c6 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -14,6 +14,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1c0a9be..9cce009 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -33,7 +33,6 @@
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
-#include <linux/cpuidle.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -94,10 +93,8 @@
* This should do all the clock switching and wait for interrupt
* tricks
*/
- if (cpuidle_idle_call()) {
- cpu_do_idle();
- local_irq_enable();
- }
+ cpu_do_idle();
+ local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c3b6c63..38f0558 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,7 +48,11 @@
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36..2c56012 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -694,6 +694,24 @@
.align 2
+/*
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed). The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
+ * passed in r0 and r1.
+ *
+ * A function pointer with a value of 0 has a special meaning, and is
+ * used to implement __hyp_get_vectors in the same way as in
+ * arch/arm64/kernel/hyp_stub.S.
+ */
ENTRY(kvm_call_hyp)
hvc #0
ret
@@ -737,7 +755,12 @@
pop x2, x3
pop x0, x1
- push lr, xzr
+ /* Check for __hyp_get_vectors */
+ cbnz x0, 1f
+ mrs x0, vbar_el2
+ b 2f
+
+1: push lr, xzr
/*
* Compute the function address in EL2, and shuffle the parameters.
@@ -750,7 +773,7 @@
blr lr
pop lr, xzr
- eret
+2: eret
el1_trap:
/*
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index c7c64a6..00a0f3c 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,22 +1,23 @@
-generic-y += clkdev.h
-generic-y += cputime.h
-generic-y += delay.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += futex.h
-generic-y += preempt.h
-generic-y += irq_regs.h
-generic-y += param.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += percpu.h
-generic-y += scatterlist.h
-generic-y += sections.h
-generic-y += topology.h
-generic-y += trace_clock.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += futex.h
+generic-y += hash.h
+generic-y += irq_regs.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
generic-y += vga.h
-generic-y += xor.h
-generic-y += hash.h
+generic-y += xor.h
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 359d36f..0d93b9a 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -10,6 +10,7 @@
generic-y += errno.h
generic-y += fb.h
generic-y += futex.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
@@ -17,14 +18,16 @@
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
generic-y += pgalloc.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
@@ -44,5 +47,3 @@
generic-y += unaligned.h
generic-y += user.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index d73bb85..8dbdce8 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -15,6 +15,7 @@
generic-y += fb.h
generic-y += fcntl.h
generic-y += futex.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += io.h
generic-y += ioctl.h
@@ -24,6 +25,7 @@
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h
@@ -34,6 +36,7 @@
generic-y += pgalloc.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += segment.h
@@ -56,5 +59,3 @@
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 09c5a0f..86648c0 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -12,6 +12,7 @@
#define _ASM_C6X_CACHE_H
#include <linux/irqflags.h>
+#include <linux/init.h>
/*
* Cache line size
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index f3fd876..056027f 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -9,8 +9,9 @@
generic-y += hash.h
generic-y += kvm_para.h
generic-y += linkage.h
+generic-y += mcs_spinlock.h
generic-y += module.h
+generic-y += preempt.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 184066c..053c17b 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -144,7 +144,7 @@
* definition, which doesn't have the same semantics. We don't want to
* use -fno-builtin, so just hide the name ffs.
*/
-#define ffs kernel_ffs
+#define ffs(x) kernel_ffs(x)
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index bc42f14..babb933 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += clkdev.h
generic-y += exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 38ca45d..eadcc11 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -25,14 +25,16 @@
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
-generic-y += local64.h
generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += pci.h
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
@@ -45,8 +47,8 @@
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
-generic-y += statfs.h
generic-y += stat.h
+generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
@@ -55,4 +57,3 @@
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 283a831..0da4aa2 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,8 +1,9 @@
generic-y += clkdev.h
generic-y += exec.h
-generic-y += kvm_para.h
-generic-y += trace_clock.h
-generic-y += preempt.h
-generic-y += vtime.h
generic-y += hash.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
+generic-y += vtime.h
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index a2496e4..5cb55a1 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -77,7 +77,6 @@
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
-#define smt_capable() (smp_num_siblings > 1)
#endif
extern void arch_fix_phys_package_id(int num, u32 slot);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index da5b462..741b99c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -477,6 +477,9 @@
char *cp, vendor[100] = "unknown";
int i;
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
+
/*
* It's too early to be able to use the standard kernel command line
* support...
@@ -529,6 +532,8 @@
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
palo_phys = EFI_INVALID_TABLE_ADDR;
if (efi_config_init(arch_tables) != 0)
@@ -657,6 +662,8 @@
return;
}
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
/*
* Now that EFI is in virtual mode, we call the EFI functions more
* efficiently:
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a96bcf8..20e8a9b 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 932435a..5825a35 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,7 +1,8 @@
generic-y += clkdev.h
generic-y += exec.h
-generic-y += module.h
-generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += module.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index dbdd223..b2e3229 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -17,6 +17,7 @@
select FPU if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
+ select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 7cc8c36..c67c94a 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,4 @@
-
+generic-y += barrier.h
generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cputime.h
@@ -6,6 +6,7 @@
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
@@ -13,11 +14,13 @@
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += mutex.h
generic-y += percpu.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -31,5 +34,3 @@
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
deleted file mode 100644
index 15c5f77..0000000
--- a/arch/m68k/include/asm/barrier.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _M68K_BARRIER_H
-#define _M68K_BARRIER_H
-
-#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-
-#include <asm-generic/barrier.h>
-
-#endif /* _M68K_BARRIER_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 014f288..9d38b73 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 349
+#define NR_syscalls 351
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 625f321..b932dd4 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -354,5 +354,7 @@
#define __NR_process_vm_writev 346
#define __NR_kcmp 347
#define __NR_finit_module 348
+#define __NR_sched_setattr 349
+#define __NR_sched_getattr 350
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 3f04ea0..b6223dc4 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -369,4 +369,6 @@
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
+ .long sys_sched_setattr
+ .long sys_sched_getattr /* 350 */
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index b716d80..c29ead8 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -13,6 +13,7 @@
generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -23,6 +24,7 @@
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
@@ -30,6 +32,7 @@
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += sembuf.h
@@ -52,5 +55,3 @@
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2b98bc7..1f590ab 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += hash.h
-generic-y += trace_clock.h
-generic-y += syscalls.h
+generic-y += mcs_spinlock.h
generic-y += preempt.h
+generic-y += syscalls.h
+generic-y += trace_clock.h
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dcae3a7..95fa1f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1776,12 +1776,12 @@
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
- default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
- range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
- default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
- range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
- default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
+ range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+ default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
range 11 64
default "11"
help
@@ -2353,9 +2353,8 @@
If unsure, say Y. Only embedded should say N here.
config MIPS_O32_FP64_SUPPORT
- bool "Support for O32 binaries using 64-bit FP"
+ bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
depends on 32BIT || MIPS32_O32
- default y
help
When this is enabled, the kernel will support use of 64-bit floating
point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@
of your kernel & potentially improve FP emulation performance by
saying N here.
- If unsure, say Y.
+ Although binutils currently supports use of this flag the details
+ concerning its effect upon the O32 ABI in userland are still being
+ worked on. In order to avoid userland becoming dependant upon current
+ behaviour before the details have been finalised, this option should
+ be considered experimental and only enabled by those working upon
+ said details.
+
+ If unsure, say N.
config USE_OF
bool
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 9edc35f..acf9a2a 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -53,10 +53,8 @@
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 9969dba..25a59a2 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -52,10 +52,8 @@
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 6d612e2..cdd8246 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,3 +1,4 @@
+#include <linux/errno.h>
#include <linux/export.h>
#include <linux/string.h>
#include <bcm47xx_board.h>
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 6decb27..2bed73a 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -196,7 +196,7 @@
char nvram_var[10];
char buf[30];
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 32; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 25fbfae..c2bb4f8 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -975,10 +975,6 @@
if (ciu > 1 || bit > 63)
return -EINVAL;
- /* These are the GPIO lines */
- if (ciu == 0 && bit >= 16 && bit < 32)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1007,6 +1003,10 @@
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
+ /* Don't map irq if it is reserved for GPIO. */
+ if (line == 0 && bit >= 16 && bit <32)
+ return 0;
+
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1525,10 +1525,6 @@
ciu = intspec[0];
bit = intspec[1];
- /* Line 7 are the GPIO lines */
- if (ciu > 6 || bit > 63)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1570,8 +1566,14 @@
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
- /* Line 7 are the GPIO lines */
- if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ /*
+ * Don't map irq if it is reserved for GPIO.
+ * (Line 7 are the GPIO lines.)
+ */
+ if (line == 7)
+ return 0;
+
+ if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (octeon_irq_ciu2_is_edge(line, bit))
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 2d7f650..0543918 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -2,16 +2,17 @@
generic-y += cputime.h
generic-y += current.h
generic-y += emergency-restart.h
+generic-y += hash.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
+generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += ucontext.h
generic-y += xor.h
-generic-y += hash.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93..4225e99 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,7 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@@ -54,11 +55,21 @@
.endm
.macro local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
@@ -106,7 +117,7 @@
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -159,7 +170,7 @@
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 6b97495..58e50cb 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@
return 0;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index ce35c9a..992aaba 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -22,12 +22,12 @@
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
- "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -35,8 +35,8 @@
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [dst] "=&r" (dst), [error] "=r" (error)\
- : [src] "r" (src) \
+ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+ : [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
@@ -44,12 +44,12 @@
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
- "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -57,8 +57,8 @@
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [error] "=r" (error) \
- : [dst] "r" (dst), [src] "r" (src)\
+ : [tmp_err] "=r" (error) \
+ : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbf..f35b131 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,6 +13,7 @@
#ifndef __ASM_MIPS_SYSCALL_H
#define __ASM_MIPS_SYSCALL_H
+#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/elf-em.h>
#include <linux/kernel.h>
@@ -39,14 +40,14 @@
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_thread_flag(TIF_32BIT_REGS))
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@
default:
BUG();
}
+
+ unreachable();
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@
unsigned int i, unsigned int n,
unsigned long *args)
{
- unsigned long arg;
int ret;
while (n--)
- ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
/*
* No way to communicate an error because this is a void function.
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 12609a1..20ea485 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -10,8 +10,4 @@
#include <topology.h>
-#ifdef CONFIG_SMP
-#define smt_capable() (smp_num_siblings > 1)
-#endif
-
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 4d3b928..413d6c6 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
-#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b39ba25..f25181b 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -163,8 +163,8 @@
*/
enum cop1x_func {
lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
+ swxc1_op = 0x08, sdxc1_op = 0x09,
+ pfetch_op = 0x0f, madd_s_op = 0x20,
madd_d_op = 0x21, madd_e_op = 0x22,
msub_s_op = 0x28, msub_d_op = 0x29,
msub_e_op = 0x2a, nmadd_s_op = 0x30,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba25..374ed74 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- ip += 4;
- safe_store_code(new_code2, ip, faulted);
+ safe_store_code(new_code2, ip + 4, faulted);
if (unlikely(faulted))
return -EFAULT;
- flush_icache_range(ip, ip + 8); /* original ip + 12 */
+ flush_icache_range(ip, ip + 8);
return 0;
}
#endif
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb..73b0ddf 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,9 +35,9 @@
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -146,11 +146,11 @@
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
- EX lw t0, SC_FPC_CSR(a0)
+ EX lw t1, SC_FPC_CSR(a0)
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -191,7 +191,7 @@
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -199,7 +199,7 @@
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
- EX lw t0, SC32_FPC_CSR(a0)
+ EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -239,7 +239,7 @@
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc696..758fb3c 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba..9c1aca0 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 506925b..0b4e2e3 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1538,10 +1538,10 @@
break;
}
- case 0x7: /* 7 */
- if (MIPSInst_FUNC(ir) != pfetch_op) {
+ case 0x3:
+ if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
- }
+
/* ignore prefx operation */
break;
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 592ac04..84ac523 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -72,7 +72,7 @@
return 0;
}
-#ifdef CONFIG_MIPS_VPE_LOADER
+#ifdef CONFIG_MIPS_VPE_LOADER_CMP
int vpe_run(struct vpe *v)
{
struct vpe_notifications *n;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ca3e3a4..2242181 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -119,7 +119,7 @@
do_IRQ(MALTA_INT_BASE + irq);
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_MT
if (aprp_hook)
aprp_hook();
#endif
@@ -310,7 +310,7 @@
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
if (aprp_hook)
aprp_hook();
#endif
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d37be36..2b91b0e 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -150,6 +150,7 @@
msg.address_lo =
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+ break;
case OCTEON_DMA_BAR_TYPE_BIG:
/* When using big bar, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 992e989..cbc6b9b 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -3,5 +3,6 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += hash.h
-generic-y += trace_clock.h
+generic-y += mcs_spinlock.h
generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 2e40f1c..480af0d 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -10,8 +10,8 @@
generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
-generic-y += cmpxchg.h
generic-y += cmpxchg-local.h
+generic-y += cmpxchg.h
generic-y += cputime.h
generic-y += current.h
generic-y += device.h
@@ -25,6 +25,7 @@
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -34,6 +35,7 @@
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h
@@ -41,6 +43,7 @@
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -53,11 +56,11 @@
generic-y += signal.h
generic-y += socket.h
generic-y += sockios.h
-generic-y += statfs.h
generic-y += stat.h
+generic-y += statfs.h
generic-y += string.h
-generic-y += switch_to.h
generic-y += swab.h
+generic-y += switch_to.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
@@ -68,5 +71,3 @@
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 752c981b..ecf25e6 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,9 +1,29 @@
+generic-y += auxvec.h
generic-y += barrier.h
-generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
- segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
- div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
- poll.h xor.h clkdev.h exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += preempt.h
+generic-y += segment.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 637fe03..60d5d17 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -32,17 +32,6 @@
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg);
-/* #define CONFIG_PARISC_TMPALIAS */
-
-#ifdef CONFIG_PARISC_TMPALIAS
-void clear_user_highpage(struct page *page, unsigned long vaddr);
-#define clear_user_highpage clear_user_highpage
-struct vm_area_struct;
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma);
-#define __HAVE_ARCH_COPY_USER_HIGHPAGE
-#endif
-
/*
* These are used to make use of C type-checking..
*/
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 3516e0b..64f2992 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -191,8 +191,4 @@
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 4270679..265ae51 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -828,13 +828,13 @@
#define __NR_finit_module (__NR_Linux + 333)
#define __NR_sched_setattr (__NR_Linux + 334)
#define __NR_sched_getattr (__NR_Linux + 335)
+#define __NR_utimes (__NR_Linux + 336)
-#define __NR_Linux_syscalls (__NR_sched_getattr + 1)
+#define __NR_Linux_syscalls (__NR_utimes + 1)
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-#define __IGNORE_utimes /* utime */
#define HPUX_GATEWAY_ADDR 0xC0000004
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ac87a40..a6ffc77 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -581,67 +581,3 @@
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
-
-#ifdef CONFIG_PARISC_TMPALIAS
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *vto;
- unsigned long flags;
-
- /* Clear using TMPALIAS region. The page doesn't need to
- be flushed but the kernel mapping needs to be purged. */
-
- vto = kmap_atomic(page);
-
- /* The PA-RISC 2.0 Architecture book states on page F-6:
- "Before a write-capable translation is enabled, *all*
- non-equivalently-aliased translations must be removed
- from the page table and purged from the TLB. (Note
- that the caches are not required to be flushed at this
- time.) Before any non-equivalent aliased translation
- is re-enabled, the virtual address range for the writeable
- page (the entire page) must be flushed from the cache,
- and the write-capable translation removed from the page
- table and purged from the TLB." */
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- purge_tlb_end(flags);
- preempt_disable();
- clear_user_page_asm(vto, vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *vfrom, *vto;
- unsigned long flags;
-
- /* Copy using TMPALIAS region. This has the advantage
- that the `from' page doesn't need to be flushed. However,
- the `to' page must be flushed in copy_user_page_asm since
- it can be used to bring in executable code. */
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- pdtlb_kernel(vfrom);
- purge_tlb_end(flags);
- preempt_disable();
- copy_user_page_asm(vto, vfrom, vaddr);
- flush_dcache_page_asm(__pa(vto), vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 8fa3fbb..80e5dd2 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -431,6 +431,7 @@
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
+ ENTRY_COMP(utimes)
/* Nothing yet */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 6c0a955..3fb1bc4 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,7 +1,8 @@
generic-y += clkdev.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
generic-y += rwsem.h
generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += vtime.h
-generic-y += hash.h
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 84fdf68..a613d2c 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -200,10 +200,11 @@
/*
* We can't access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit ABI
+ * can access 288 bytes in the 64bit big-endian ABI,
+ * or 512 bytes with the new ELFv2 little-endian ABI.
*/
if (!is_32bit_task())
- usp -= 288;
+ usp -= USER_REDZONE_SIZE;
return (void __user *) (usp - len);
}
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 40157e2..ed82142 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -816,8 +816,8 @@
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
-int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
+int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
+int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index becc08e..279b80f 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -28,11 +28,23 @@
#ifdef __powerpc64__
+/*
+ * Size of redzone that userspace is allowed to use below the stack
+ * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
+ * the new ELFv2 little-endian ABI, so we allow the larger amount.
+ *
+ * For kernel code we allow a 288-byte redzone, in order to conserve
+ * kernel stack space; gcc currently only uses 288 bytes, and will
+ * hopefully allow explicit control of the redzone size in future.
+ */
+#define USER_REDZONE_SIZE 512
+#define KERNEL_REDZONE_SIZE 288
+
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
- STACK_FRAME_OVERHEAD + 288)
+ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */
@@ -41,6 +53,8 @@
#else /* __powerpc64__ */
+#define USER_REDZONE_SIZE 0
+#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index d0b5fca..c920215 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -99,7 +99,6 @@
#ifdef CONFIG_SMP
#include <asm/cputable.h>
-#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
#ifdef CONFIG_PPC64
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 11c1d06..7a13f37 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -98,17 +98,19 @@
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
+ paddr = pfn << PAGE_SHIFT;
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
- vaddr = __va(pfn << PAGE_SHIFT);
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 9b27b29..b0ded97 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -74,6 +74,7 @@
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
+ addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
return create_branch((unsigned int *)ip, addr, 0);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8d4c247f1..af064d2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1048,6 +1048,15 @@
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
+ /*
+ * Flush TM state out so we can copy it. __switch_to_tm() does this
+ * flush but it removes the checkpointed state from the current CPU and
+ * transitions the CPU out of TM mode. Hence we need to call
+ * tm_recheckpoint_new_task() (on the same task) to restore the
+ * checkpointed state back and the TM mode.
+ */
+ __switch_to_tm(src);
+ tm_recheckpoint_new_task(src);
*dst = *src;
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index 1482327..d88736f 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index e35bf77..8d253c2 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -65,8 +65,8 @@
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
- /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
- char abigap[288];
+ /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
+ char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
static const char fmt32[] = KERN_INFO \
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e66d4ec..818dce3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1504,73 +1504,6 @@
1: addi r8,r8,16
.endr
- /* Save DEC */
- mfspr r5,SPRN_DEC
- mftb r6
- extsw r5,r5
- add r5,r5,r6
- std r5,VCPU_DEC_EXPIRES(r9)
-
-BEGIN_FTR_SECTION
- b 8f
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- /* Save POWER8-specific registers */
- mfspr r5, SPRN_IAMR
- mfspr r6, SPRN_PSPB
- mfspr r7, SPRN_FSCR
- std r5, VCPU_IAMR(r9)
- stw r6, VCPU_PSPB(r9)
- std r7, VCPU_FSCR(r9)
- mfspr r5, SPRN_IC
- mfspr r6, SPRN_VTB
- mfspr r7, SPRN_TAR
- std r5, VCPU_IC(r9)
- std r6, VCPU_VTB(r9)
- std r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-#endif
- mfspr r8, SPRN_EBBHR
- std r8, VCPU_EBBHR(r9)
- mfspr r5, SPRN_EBBRR
- mfspr r6, SPRN_BESCR
- mfspr r7, SPRN_CSIGR
- mfspr r8, SPRN_TACR
- std r5, VCPU_EBBRR(r9)
- std r6, VCPU_BESCR(r9)
- std r7, VCPU_CSIGR(r9)
- std r8, VCPU_TACR(r9)
- mfspr r5, SPRN_TCSCR
- mfspr r6, SPRN_ACOP
- mfspr r7, SPRN_PID
- mfspr r8, SPRN_WORT
- std r5, VCPU_TCSCR(r9)
- std r6, VCPU_ACOP(r9)
- stw r7, VCPU_GUEST_PID(r9)
- std r8, VCPU_WORT(r9)
-8:
-
- /* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
- mfspr r5,SPRN_AMR
- mfspr r6,SPRN_UAMOR
- std r5,VCPU_AMR(r9)
- std r6,VCPU_UAMOR(r9)
- li r6,0
- mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@@ -2203,7 +2136,7 @@
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
- stw r6,VCPU_VRSAVE(r3)
+ stw r6,VCPU_VRSAVE(r31)
mtlr r30
mtmsrd r5
isync
@@ -2240,7 +2173,7 @@
bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- lwz r7,VCPU_VRSAVE(r4)
+ lwz r7,VCPU_VRSAVE(r31)
mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5ec1e47..e865d74 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,8 @@
area->nid = nid;
area->order = order;
- area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
+ area->pages = alloc_pages_exact_node(area->nid,
+ GFP_KERNEL|__GFP_THISNODE,
area->order);
if (!area->pages) {
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 4931838..4a0a64f 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -83,7 +83,6 @@
#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
-#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index f514743..253fefe 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -114,6 +114,7 @@
ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
#endif /* CONFIG_DEBUG_FS */
+
/**
* ioda_eeh_post_init - Chip dependent post initialization
* @hose: PCI controller
@@ -221,6 +222,22 @@
return ret;
}
+static void ioda_eeh_phb_diag(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+ long rc;
+
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (rc != OPAL_SUCCESS) {
+ pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
+ __func__, hose->global_number, rc);
+ return;
+ }
+
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
+}
+
/**
* ioda_eeh_get_state - Retrieve the state of PE
* @pe: EEH PE
@@ -272,6 +289,9 @@
result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED;
result |= EEH_STATE_DMA_ENABLED;
+ } else if (!(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
}
return result;
@@ -315,6 +335,15 @@
__func__, fstate, hose->global_number, pe_no);
}
+ /* Dump PHB diag-data for frozen PE */
+ if (result != EEH_STATE_NOT_SUPPORT &&
+ (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
+ (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
+ !(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
+ }
+
return result;
}
@@ -530,42 +559,6 @@
}
/**
- * ioda_eeh_get_log - Retrieve error log
- * @pe: EEH PE
- * @severity: Severity level of the log
- * @drv_log: buffer to store the log
- * @len: space of the log buffer
- *
- * The function is used to retrieve error log from P7IOC.
- */
-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
-{
- s64 ret;
- unsigned long flags;
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
-
- spin_lock_irqsave(&phb->lock, flags);
-
- ret = opal_pci_get_phb_diag_data2(phb->opal_id,
- phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
- if (ret) {
- spin_unlock_irqrestore(&phb->lock, flags);
- pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
- __func__, hose->global_number, pe->addr, ret);
- return -EIO;
- }
-
- /* The PHB diag-data is always indicative */
- pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
-
- spin_unlock_irqrestore(&phb->lock, flags);
-
- return 0;
-}
-
-/**
* ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
* @pe: EEH PE
*
@@ -646,22 +639,6 @@
}
}
-static void ioda_eeh_phb_diag(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- long rc;
-
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
- PNV_PCI_DIAG_BUF_SIZE);
- if (rc != OPAL_SUCCESS) {
- pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
- __func__, hose->global_number, rc);
- return;
- }
-
- pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
-}
-
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
struct eeh_pe **pe)
{
@@ -835,6 +812,20 @@
}
/*
+ * EEH core will try recover from fenced PHB or
+ * frozen PE. In the time for frozen PE, EEH core
+ * enable IO path for that before collecting logs,
+ * but it ruins the site. So we have to dump the
+ * log in advance here.
+ */
+ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
+ ret == EEH_NEXT_ERR_FENCED_PHB) &&
+ !((*pe)->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
+ }
+
+ /*
* If we have no errors on the specific PHB or only
* informative error there, we continue poking it.
* Otherwise, we need actions to be taken by upper
@@ -852,7 +843,6 @@
.set_option = ioda_eeh_set_option,
.get_state = ioda_eeh_get_state,
.reset = ioda_eeh_reset,
- .get_log = ioda_eeh_get_log,
.configure_bridge = ioda_eeh_configure_bridge,
.next_error = ioda_eeh_next_error
};
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4fbf276..4cd2ea6 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -71,11 +71,11 @@
}
}
-static u64 opal_scom_unmangle(u64 reg)
+static u64 opal_scom_unmangle(u64 addr)
{
/*
* XSCOM indirect addresses have the top bit set. Additionally
- * the reset of the top 3 nibbles is always 0.
+ * the rest of the top 3 nibbles is always 0.
*
* Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits
@@ -86,10 +86,13 @@
* conversion here. To leave room for further xscom address
* expansion, we only clear out the top byte
*
+ * For in-kernel use, we also support the real indirect bit, so
+ * we test for any of the top 5 bits
+ *
*/
- if (reg & (1ull << 59))
- reg = (reg & ~(0xffull << 56)) | (1ull << 63);
- return reg;
+ if (addr & (0x1full << 59))
+ addr = (addr & ~(0xffull << 56)) | (1ull << 63);
+ return addr;
}
static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
@@ -98,8 +101,8 @@
int64_t rc;
__be64 v;
- reg = opal_scom_unmangle(reg);
- rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+ reg = opal_scom_unmangle(m->addr + reg);
+ rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
*value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc);
}
@@ -109,8 +112,8 @@
struct opal_scom_map *m = map;
int64_t rc;
- reg = opal_scom_unmangle(reg);
- rc = opal_xscom_write(m->chip, m->addr + reg, value);
+ reg = opal_scom_unmangle(m->addr + reg);
+ rc = opal_xscom_write(m->chip, reg, value);
return opal_xscom_err_xlate(rc);
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 95633d7..8518817 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -134,57 +134,72 @@
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+ if (data->brdgCtl)
+ pr_info(" brdgCtl: %08x\n",
+ data->brdgCtl);
+ if (data->portStatusReg || data->rootCmplxStatus ||
+ data->busAgentStatus)
+ pr_info(" UtlSts: %08x %08x %08x\n",
+ data->portStatusReg, data->rootCmplxStatus,
+ data->busAgentStatus);
+ if (data->deviceStatus || data->slotStatus ||
+ data->linkStatus || data->devCmdStatus ||
+ data->devSecStatus)
+ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
+ data->deviceStatus, data->slotStatus,
+ data->linkStatus, data->devCmdStatus,
+ data->devSecStatus);
+ if (data->rootErrorStatus || data->uncorrErrorStatus ||
+ data->corrErrorStatus)
+ pr_info(" RootErrSts: %08x %08x %08x\n",
+ data->rootErrorStatus, data->uncorrErrorStatus,
+ data->corrErrorStatus);
+ if (data->tlpHdr1 || data->tlpHdr2 ||
+ data->tlpHdr3 || data->tlpHdr4)
+ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
+ data->tlpHdr1, data->tlpHdr2,
+ data->tlpHdr3, data->tlpHdr4);
+ if (data->sourceId || data->errorClass ||
+ data->correlator)
+ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
+ data->sourceId, data->errorClass,
+ data->correlator);
+ if (data->p7iocPlssr || data->p7iocCsr)
+ pr_info(" PhbSts: %016llx %016llx\n",
+ data->p7iocPlssr, data->p7iocCsr);
+ if (data->lemFir || data->lemErrorMask ||
+ data->lemWOF)
+ pr_info(" Lem: %016llx %016llx %016llx\n",
+ data->lemFir, data->lemErrorMask,
+ data->lemWOF);
+ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
+ data->phbErrorLog0 || data->phbErrorLog1)
+ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
+ data->phbErrorStatus, data->phbFirstErrorStatus,
+ data->phbErrorLog0, data->phbErrorLog1);
+ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
+ data->mmioErrorLog0 || data->mmioErrorLog1)
+ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
+ data->mmioErrorStatus, data->mmioFirstErrorStatus,
+ data->mmioErrorLog0, data->mmioErrorLog1);
+ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
+ data->dma0ErrorLog0 || data->dma0ErrorLog1)
+ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
+ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
+ data->dma0ErrorLog0, data->dma0ErrorLog1);
+ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
+ data->dma1ErrorLog0 || data->dma1ErrorLog1)
+ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
+ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
+ data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
+ i, data->pestA[i], data->pestB[i]);
}
}
@@ -197,62 +212,77 @@
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
-
- pr_info(" nFir: %016llx\n", data->nFir);
- pr_info(" nFirMask: %016llx\n", data->nFirMask);
- pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
- pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
- pr_info(" PhbCsr: %016llx\n", data->phbCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+ if (data->brdgCtl)
+ pr_info(" brdgCtl: %08x\n",
+ data->brdgCtl);
+ if (data->portStatusReg || data->rootCmplxStatus ||
+ data->busAgentStatus)
+ pr_info(" UtlSts: %08x %08x %08x\n",
+ data->portStatusReg, data->rootCmplxStatus,
+ data->busAgentStatus);
+ if (data->deviceStatus || data->slotStatus ||
+ data->linkStatus || data->devCmdStatus ||
+ data->devSecStatus)
+ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
+ data->deviceStatus, data->slotStatus,
+ data->linkStatus, data->devCmdStatus,
+ data->devSecStatus);
+ if (data->rootErrorStatus || data->uncorrErrorStatus ||
+ data->corrErrorStatus)
+ pr_info(" RootErrSts: %08x %08x %08x\n",
+ data->rootErrorStatus, data->uncorrErrorStatus,
+ data->corrErrorStatus);
+ if (data->tlpHdr1 || data->tlpHdr2 ||
+ data->tlpHdr3 || data->tlpHdr4)
+ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
+ data->tlpHdr1, data->tlpHdr2,
+ data->tlpHdr3, data->tlpHdr4);
+ if (data->sourceId || data->errorClass ||
+ data->correlator)
+ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
+ data->sourceId, data->errorClass,
+ data->correlator);
+ if (data->nFir || data->nFirMask ||
+ data->nFirWOF)
+ pr_info(" nFir: %016llx %016llx %016llx\n",
+ data->nFir, data->nFirMask,
+ data->nFirWOF);
+ if (data->phbPlssr || data->phbCsr)
+ pr_info(" PhbSts: %016llx %016llx\n",
+ data->phbPlssr, data->phbCsr);
+ if (data->lemFir || data->lemErrorMask ||
+ data->lemWOF)
+ pr_info(" Lem: %016llx %016llx %016llx\n",
+ data->lemFir, data->lemErrorMask,
+ data->lemWOF);
+ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
+ data->phbErrorLog0 || data->phbErrorLog1)
+ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
+ data->phbErrorStatus, data->phbFirstErrorStatus,
+ data->phbErrorLog0, data->phbErrorLog1);
+ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
+ data->mmioErrorLog0 || data->mmioErrorLog1)
+ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
+ data->mmioErrorStatus, data->mmioFirstErrorStatus,
+ data->mmioErrorLog0, data->mmioErrorLog1);
+ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
+ data->dma0ErrorLog0 || data->dma0ErrorLog1)
+ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
+ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
+ data->dma0ErrorLog0, data->dma0ErrorLog1);
+ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
+ data->dma1ErrorLog0 || data->dma1ErrorLog1)
+ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
+ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
+ data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
+ i, data->pestA[i], data->pestB[i]);
}
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 110f4fb..81a7a0a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -26,7 +26,6 @@
#include <linux/of_fdt.h>
#include <linux/interrupt.h>
#include <linux/bug.h>
-#include <linux/cpuidle.h>
#include <linux/pci.h>
#include <asm/machdep.h>
@@ -225,16 +224,6 @@
return 1;
}
-void powernv_idle(void)
-{
- /* Hook to cpuidle framework if available, else
- * call on default platform idle code
- */
- if (cpuidle_idle_call()) {
- power7_idle();
- }
-}
-
define_machine(powernv) {
.name = "PowerNV",
.probe = pnv_probe,
@@ -244,7 +233,7 @@
.show_cpuinfo = pnv_show_cpuinfo,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
- .power_save = powernv_idle,
+ .power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
.dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 82789e7..0ea99e3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -35,12 +35,7 @@
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
+static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@@ -93,15 +88,20 @@
static void rtas_stop_self(void)
{
- struct rtas_args *args = &rtas_stop_self_args;
+ struct rtas_args args = {
+ .token = cpu_to_be32(rtas_stop_self_token),
+ .nargs = 0,
+ .nret = 1,
+ .rets = &args.args[0],
+ };
local_irq_disable();
- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(args));
+ enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@@ -392,10 +392,10 @@
}
}
- rtas_stop_self_args.token = rtas_token("stop-self");
+ rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 972df0ff..2db8cc6 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -39,7 +39,6 @@
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
-#include <linux/cpuidle.h>
#include <linux/of.h>
#include <linux/kexec.h>
@@ -356,29 +355,24 @@
static void pseries_lpar_idle(void)
{
- /* This would call on the cpuidle framework, and the back-end pseries
- * driver to go to idle states
+ /*
+ * Default handler to go into low thread priority and possibly
+ * low power mode by cedeing processor to hypervisor
*/
- if (cpuidle_idle_call()) {
- /* On error, execute default handler
- * to go into low thread priority and possibly
- * low power mode by cedeing processor to hypervisor
- */
- /* Indicate to hypervisor that we are idle. */
- get_lppaca()->idle = 1;
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- */
- cede_processor();
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
- get_lppaca()->idle = 0;
- }
+ get_lppaca()->idle = 0;
}
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index cbe56154..953f17c8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -117,6 +117,7 @@
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 8386a4a..57892a8 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += clkdev.h
-generic-y += trace_clock.h
-generic-y += preempt.h
generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 5d7e8cf..d350ed9 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -8,7 +8,11 @@
#include <linux/thread_info.h>
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
-#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v))
+
+#define __SC_DELOUSE(t,v) ({ \
+ BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
+ (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
+})
#define PSW32_MASK_PER 0x40000000UL
#define PSW32_MASK_DAT 0x04000000UL
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index db02052..ca38139 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -86,48 +86,51 @@
#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
-asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_chown16, const char __user *, filename,
+ u16, user, u16, group)
{
return sys_chown(filename, low2highuid(user), low2highgid(group));
}
-asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_lchown16, const char __user *,
+ filename, u16, user, u16, group)
{
return sys_lchown(filename, low2highuid(user), low2highgid(group));
}
-asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group)
{
return sys_fchown(fd, low2highuid(user), low2highgid(group));
}
-asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
{
return sys_setregid(low2highgid(rgid), low2highgid(egid));
}
-asmlinkage long sys32_setgid16(u16 gid)
+COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
{
return sys_setgid((gid_t)gid);
}
-asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
{
return sys_setreuid(low2highuid(ruid), low2highuid(euid));
}
-asmlinkage long sys32_setuid16(u16 uid)
+COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
{
return sys_setuid((uid_t)uid);
}
-asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
{
return sys_setresuid(low2highuid(ruid), low2highuid(euid),
- low2highuid(suid));
+ low2highuid(suid));
}
-asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp)
+COMPAT_SYSCALL_DEFINE3(s390_getresuid16, u16 __user *, ruidp,
+ u16 __user *, euidp, u16 __user *, suidp)
{
const struct cred *cred = current_cred();
int retval;
@@ -144,13 +147,14 @@
return retval;
}
-asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+COMPAT_SYSCALL_DEFINE3(s390_setresgid16, u16, rgid, u16, egid, u16, sgid)
{
return sys_setresgid(low2highgid(rgid), low2highgid(egid),
- low2highgid(sgid));
+ low2highgid(sgid));
}
-asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp)
+COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
+ u16 __user *, egidp, u16 __user *, sgidp)
{
const struct cred *cred = current_cred();
int retval;
@@ -167,12 +171,12 @@
return retval;
}
-asmlinkage long sys32_setfsuid16(u16 uid)
+COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
{
return sys_setfsuid((uid_t)uid);
}
-asmlinkage long sys32_setfsgid16(u16 gid)
+COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
{
return sys_setfsgid((gid_t)gid);
}
@@ -215,7 +219,7 @@
return 0;
}
-asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
+COMPAT_SYSCALL_DEFINE2(s390_getgroups16, int, gidsetsize, u16 __user *, grouplist)
{
const struct cred *cred = current_cred();
int i;
@@ -240,7 +244,7 @@
return i;
}
-asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
+COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplist)
{
struct group_info *group_info;
int retval;
@@ -265,22 +269,22 @@
return retval;
}
-asmlinkage long sys32_getuid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getuid16)
{
return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
}
-asmlinkage long sys32_geteuid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_geteuid16)
{
return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
}
-asmlinkage long sys32_getgid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getgid16)
{
return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
}
-asmlinkage long sys32_getegid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getegid16)
{
return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
}
@@ -295,41 +299,35 @@
}
#endif
-asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
+COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low)
{
- if ((int)high < 0)
- return -EINVAL;
- else
- return sys_truncate(path, (high << 32) | low);
+ return sys_truncate(path, (unsigned long)high << 32 | low);
}
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low)
{
- if ((int)high < 0)
- return -EINVAL;
- else
- return sys_ftruncate(fd, (high << 32) | low);
+ return sys_ftruncate(fd, (unsigned long)high << 32 | low);
}
-asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
- size_t count, u32 poshi, u32 poslo)
+COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf,
+ compat_size_t, count, u32, high, u32, low)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
- return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+ return sys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low);
}
-asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
- size_t count, u32 poshi, u32 poslo)
+COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf,
+ compat_size_t, count, u32, high, u32, low)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
- return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+ return sys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low);
}
-asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
+COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count)
{
- return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
+ return sys_readahead(fd, (unsigned long)high << 32 | low, count);
}
struct stat64_emu31 {
@@ -381,7 +379,7 @@
return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
-asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_stat(filename, &stat);
@@ -390,7 +388,7 @@
return ret;
}
-asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_lstat(filename, &stat);
@@ -399,7 +397,7 @@
return ret;
}
-asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_fstat(fd, &stat);
@@ -408,8 +406,8 @@
return ret;
}
-asmlinkage long sys32_fstatat64(unsigned int dfd, const char __user *filename,
- struct stat64_emu31 __user* statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename,
+ struct stat64_emu31 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
@@ -435,7 +433,7 @@
compat_ulong_t offset;
};
-asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg)
{
struct mmap_arg_struct_emu31 a;
@@ -447,7 +445,7 @@
a.offset >> PAGE_SHIFT);
}
-asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg)
{
struct mmap_arg_struct_emu31 a;
@@ -456,7 +454,7 @@
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
-asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
+COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
@@ -464,7 +462,7 @@
return sys_read(fd, buf, count);
}
-asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t count)
+COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
@@ -478,14 +476,13 @@
* because the 31 bit values differ from the 64 bit values.
*/
-asmlinkage long
-sys32_fadvise64(int fd, loff_t offset, size_t len, int advise)
+COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise)
{
if (advise == 4)
advise = POSIX_FADV_DONTNEED;
else if (advise == 5)
advise = POSIX_FADV_NOREUSE;
- return sys_fadvise64(fd, offset, len, advise);
+ return sys_fadvise64(fd, (unsigned long)high << 32 | low, len, advise);
}
struct fadvise64_64_args {
@@ -495,8 +492,7 @@
int advice;
};
-asmlinkage long
-sys32_fadvise64_64(struct fadvise64_64_args __user *args)
+COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
{
struct fadvise64_64_args a;
@@ -508,3 +504,17 @@
a.advice = POSIX_FADV_NOREUSE;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
+
+COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow,
+ u32, nhigh, u32, nlow, unsigned int, flags)
+{
+ return sys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow,
+ ((u64)nhigh << 32) + nlow, flags);
+}
+
+COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow,
+ u32, lenhigh, u32, lenlow)
+{
+ return sys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow,
+ ((u64)lenhigh << 32) + lenlow);
+}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1bfda3e..39ddfdb 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -76,46 +76,43 @@
struct mmap_arg_struct_emu31;
struct fadvise64_64_args;
-long sys32_chown16(const char __user * filename, u16 user, u16 group);
-long sys32_lchown16(const char __user * filename, u16 user, u16 group);
-long sys32_fchown16(unsigned int fd, u16 user, u16 group);
-long sys32_setregid16(u16 rgid, u16 egid);
-long sys32_setgid16(u16 gid);
-long sys32_setreuid16(u16 ruid, u16 euid);
-long sys32_setuid16(u16 uid);
-long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
-long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
-long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
-long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
-long sys32_setfsuid16(u16 uid);
-long sys32_setfsgid16(u16 gid);
-long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
-long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
-long sys32_getuid16(void);
-long sys32_geteuid16(void);
-long sys32_getgid16(void);
-long sys32_getegid16(void);
-long sys32_truncate64(const char __user * path, unsigned long high,
- unsigned long low);
-long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
-long sys32_init_module(void __user *umod, unsigned long len,
- const char __user *uargs);
-long sys32_delete_module(const char __user *name_user, unsigned int flags);
-long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
- u32 poshi, u32 poslo);
-long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
- size_t count, u32 poshi, u32 poslo);
-compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
-long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf);
-long sys32_lstat64(const char __user * filename,
- struct stat64_emu31 __user * statbuf);
-long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
-long sys32_fstatat64(unsigned int dfd, const char __user *filename,
- struct stat64_emu31 __user* statbuf, int flag);
-unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
-long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
-long sys32_read(unsigned int fd, char __user * buf, size_t count);
-long sys32_write(unsigned int fd, const char __user * buf, size_t count);
-long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
-long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
+long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group);
+long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group);
+long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group);
+long compat_sys_s390_setregid16(u16 rgid, u16 egid);
+long compat_sys_s390_setgid16(u16 gid);
+long compat_sys_s390_setreuid16(u16 ruid, u16 euid);
+long compat_sys_s390_setuid16(u16 uid);
+long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid);
+long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
+long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid);
+long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
+long compat_sys_s390_setfsuid16(u16 uid);
+long compat_sys_s390_setfsgid16(u16 gid);
+long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist);
+long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist);
+long compat_sys_s390_getuid16(void);
+long compat_sys_s390_geteuid16(void);
+long compat_sys_s390_getgid16(void);
+long compat_sys_s390_getegid16(void);
+long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low);
+long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low);
+long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low);
+long compat_sys_s390_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, u32 high, u32 low);
+long compat_sys_s390_readahead(int fd, u32 high, u32 low, s32 count);
+long compat_sys_s390_stat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_lstat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag);
+long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg);
+long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg);
+long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count);
+long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count);
+long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise);
+long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
+long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags);
+long compat_sys_s390_fallocate(int fd, int mode, u32 offhigh, u32 offlow, u32 lenhigh, u32 lenlow);
+long compat_sys_sigreturn(void);
+long compat_sys_rt_sigreturn(void);
+
#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 8b84bc3..7df5ed9 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -241,7 +241,7 @@
return 0;
}
-asmlinkage long sys32_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
@@ -260,7 +260,7 @@
return 0;
}
-asmlinkage long sys32_rt_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
deleted file mode 100644
index 59c8efc..0000000
--- a/arch/s390/kernel/compat_wrapper.S
+++ /dev/null
@@ -1,1425 +0,0 @@
-/*
-* wrapper for 31 bit compatible system calls.
-*
-* Copyright IBM Corp. 2000, 2006
-* Author(s): Gerhard Tonn (ton@de.ibm.com),
-* Thomas Spatzier (tspat@de.ibm.com)
-*/
-
-#include <linux/linkage.h>
-
-ENTRY(sys32_exit_wrapper)
- lgfr %r2,%r2 # int
- jg sys_exit # branch to sys_exit
-
-ENTRY(sys32_read_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- jg sys32_read # branch to sys_read
-
-ENTRY(sys32_write_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # size_t
- jg sys32_write # branch to system call
-
-ENTRY(sys32_close_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_close # branch to system call
-
-ENTRY(sys32_creat_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- jg sys_creat # branch to system call
-
-ENTRY(sys32_link_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- jg sys_link # branch to system call
-
-ENTRY(sys32_unlink_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_unlink # branch to system call
-
-ENTRY(sys32_chdir_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_chdir # branch to system call
-
-ENTRY(sys32_time_wrapper)
- llgtr %r2,%r2 # int *
- jg compat_sys_time # branch to system call
-
-ENTRY(sys32_mknod_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- llgfr %r4,%r4 # dev
- jg sys_mknod # branch to system call
-
-ENTRY(sys32_chmod_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # mode_t
- jg sys_chmod # branch to system call
-
-ENTRY(sys32_lchown16_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_uid_emu31_t
- jg sys32_lchown16 # branch to system call
-
-#sys32_getpid_wrapper # void
-
-ENTRY(sys32_mount_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # char *
- llgfr %r5,%r5 # unsigned long
- llgtr %r6,%r6 # void *
- jg compat_sys_mount # branch to system call
-
-ENTRY(sys32_oldumount_wrapper)
- llgtr %r2,%r2 # char *
- jg sys_oldumount # branch to system call
-
-ENTRY(sys32_setuid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- jg sys32_setuid16 # branch to system call
-
-#sys32_getuid16_wrapper # void
-
-ENTRY(sys32_ptrace_wrapper)
- lgfr %r2,%r2 # long
- lgfr %r3,%r3 # long
- llgtr %r4,%r4 # long
- llgfr %r5,%r5 # long
- jg compat_sys_ptrace # branch to system call
-
-ENTRY(sys32_alarm_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_alarm # branch to system call
-
-ENTRY(compat_sys_utime_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct compat_utimbuf *
- jg compat_sys_utime # branch to system call
-
-ENTRY(sys32_access_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- jg sys_access # branch to system call
-
-ENTRY(sys32_nice_wrapper)
- lgfr %r2,%r2 # int
- jg sys_nice # branch to system call
-
-#sys32_sync_wrapper # void
-
-ENTRY(sys32_kill_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_kill # branch to system call
-
-ENTRY(sys32_rename_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- jg sys_rename # branch to system call
-
-ENTRY(sys32_mkdir_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- jg sys_mkdir # branch to system call
-
-ENTRY(sys32_rmdir_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_rmdir # branch to system call
-
-ENTRY(sys32_dup_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_dup # branch to system call
-
-ENTRY(sys32_pipe_wrapper)
- llgtr %r2,%r2 # u32 *
- jg sys_pipe # branch to system call
-
-ENTRY(compat_sys_times_wrapper)
- llgtr %r2,%r2 # struct compat_tms *
- jg compat_sys_times # branch to system call
-
-ENTRY(sys32_brk_wrapper)
- llgtr %r2,%r2 # unsigned long
- jg sys_brk # branch to system call
-
-ENTRY(sys32_setgid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- jg sys32_setgid16 # branch to system call
-
-#sys32_getgid16_wrapper # void
-
-ENTRY(sys32_signal_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # __sighandler_t
- jg sys_signal
-
-#sys32_geteuid16_wrapper # void
-
-#sys32_getegid16_wrapper # void
-
-ENTRY(sys32_acct_wrapper)
- llgtr %r2,%r2 # char *
- jg sys_acct # branch to system call
-
-ENTRY(sys32_umount_wrapper)
- llgtr %r2,%r2 # char *
- lgfr %r3,%r3 # int
- jg sys_umount # branch to system call
-
-ENTRY(compat_sys_ioctl_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- llgfr %r4,%r4 # unsigned int
- jg compat_sys_ioctl # branch to system call
-
-ENTRY(compat_sys_fcntl_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- llgfr %r4,%r4 # unsigned long
- jg compat_sys_fcntl # branch to system call
-
-ENTRY(sys32_setpgid_wrapper)
- lgfr %r2,%r2 # pid_t
- lgfr %r3,%r3 # pid_t
- jg sys_setpgid # branch to system call
-
-ENTRY(sys32_umask_wrapper)
- lgfr %r2,%r2 # int
- jg sys_umask # branch to system call
-
-ENTRY(sys32_chroot_wrapper)
- llgtr %r2,%r2 # char *
- jg sys_chroot # branch to system call
-
-ENTRY(sys32_ustat_wrapper)
- llgfr %r2,%r2 # dev_t
- llgtr %r3,%r3 # struct ustat *
- jg compat_sys_ustat
-
-ENTRY(sys32_dup2_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- jg sys_dup2 # branch to system call
-
-#sys32_getppid_wrapper # void
-
-#sys32_getpgrp_wrapper # void
-
-#sys32_setsid_wrapper # void
-
-ENTRY(sys32_setreuid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- jg sys32_setreuid16 # branch to system call
-
-ENTRY(sys32_setregid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- llgfr %r3,%r3 # __kernel_old_gid_emu31_t
- jg sys32_setregid16 # branch to system call
-
-ENTRY(sys_sigsuspend_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- llgfr %r4,%r4 # old_sigset_t
- jg sys_sigsuspend
-
-ENTRY(compat_sys_sigpending_wrapper)
- llgtr %r2,%r2 # compat_old_sigset_t *
- jg compat_sys_sigpending # branch to system call
-
-ENTRY(sys32_sethostname_wrapper)
- llgtr %r2,%r2 # char *
- lgfr %r3,%r3 # int
- jg sys_sethostname # branch to system call
-
-ENTRY(compat_sys_setrlimit_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # struct rlimit_emu31 *
- jg compat_sys_setrlimit # branch to system call
-
-ENTRY(compat_sys_old_getrlimit_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # struct rlimit_emu31 *
- jg compat_sys_old_getrlimit # branch to system call
-
-ENTRY(compat_sys_getrlimit_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # struct rlimit_emu31 *
- jg compat_sys_getrlimit # branch to system call
-
-ENTRY(sys32_mmap2_wrapper)
- llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
- jg sys32_mmap2 # branch to system call
-
-ENTRY(compat_sys_gettimeofday_wrapper)
- llgtr %r2,%r2 # struct timeval_emu31 *
- llgtr %r3,%r3 # struct timezone *
- jg compat_sys_gettimeofday # branch to system call
-
-ENTRY(compat_sys_settimeofday_wrapper)
- llgtr %r2,%r2 # struct timeval_emu31 *
- llgtr %r3,%r3 # struct timezone *
- jg compat_sys_settimeofday # branch to system call
-
-ENTRY(sys32_getgroups16_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
- jg sys32_getgroups16 # branch to system call
-
-ENTRY(sys32_setgroups16_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
- jg sys32_setgroups16 # branch to system call
-
-ENTRY(sys32_symlink_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- jg sys_symlink # branch to system call
-
-ENTRY(sys32_readlink_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # char *
- lgfr %r4,%r4 # int
- jg sys_readlink # branch to system call
-
-ENTRY(sys32_uselib_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_uselib # branch to system call
-
-ENTRY(sys32_swapon_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- jg sys_swapon # branch to system call
-
-ENTRY(sys32_reboot_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- llgfr %r4,%r4 # unsigned int
- llgtr %r5,%r5 # void *
- jg sys_reboot # branch to system call
-
-ENTRY(old32_readdir_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # void *
- llgfr %r4,%r4 # unsigned int
- jg compat_sys_old_readdir # branch to system call
-
-ENTRY(old32_mmap_wrapper)
- llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
- jg old32_mmap # branch to system call
-
-ENTRY(sys32_munmap_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- jg sys_munmap # branch to system call
-
-ENTRY(sys32_fchmod_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # mode_t
- jg sys_fchmod # branch to system call
-
-ENTRY(sys32_fchown16_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # compat_uid_t
- llgfr %r4,%r4 # compat_uid_t
- jg sys32_fchown16 # branch to system call
-
-ENTRY(sys32_getpriority_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_getpriority # branch to system call
-
-ENTRY(sys32_setpriority_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- jg sys_setpriority # branch to system call
-
-ENTRY(compat_sys_statfs_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct compat_statfs *
- jg compat_sys_statfs # branch to system call
-
-ENTRY(compat_sys_fstatfs_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # struct compat_statfs *
- jg compat_sys_fstatfs # branch to system call
-
-ENTRY(compat_sys_socketcall_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # u32 *
- jg compat_sys_socketcall # branch to system call
-
-ENTRY(sys32_syslog_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # char *
- lgfr %r4,%r4 # int
- jg sys_syslog # branch to system call
-
-ENTRY(compat_sys_newstat_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct stat_emu31 *
- jg compat_sys_newstat # branch to system call
-
-ENTRY(compat_sys_newlstat_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct stat_emu31 *
- jg compat_sys_newlstat # branch to system call
-
-ENTRY(compat_sys_newfstat_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # struct stat_emu31 *
- jg compat_sys_newfstat # branch to system call
-
-#sys32_vhangup_wrapper # void
-
-ENTRY(sys32_swapoff_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_swapoff # branch to system call
-
-ENTRY(compat_sys_sysinfo_wrapper)
- llgtr %r2,%r2 # struct sysinfo_emu31 *
- jg compat_sys_sysinfo # branch to system call
-
-ENTRY(sys32_fsync_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_fsync # branch to system call
-
-#sys32_sigreturn_wrapper # done in sigreturn_glue
-
-#sys32_clone_wrapper # done in clone_glue
-
-ENTRY(sys32_setdomainname_wrapper)
- llgtr %r2,%r2 # char *
- lgfr %r3,%r3 # int
- jg sys_setdomainname # branch to system call
-
-ENTRY(sys32_newuname_wrapper)
- llgtr %r2,%r2 # struct new_utsname *
- jg sys_newuname # branch to system call
-
-ENTRY(compat_sys_adjtimex_wrapper)
- llgtr %r2,%r2 # struct compat_timex *
- jg compat_sys_adjtimex # branch to system call
-
-ENTRY(sys32_mprotect_wrapper)
- llgtr %r2,%r2 # unsigned long (actually pointer
- llgfr %r3,%r3 # size_t
- llgfr %r4,%r4 # unsigned long
- jg sys_mprotect # branch to system call
-
-ENTRY(sys_init_module_wrapper)
- llgtr %r2,%r2 # void *
- llgfr %r3,%r3 # unsigned long
- llgtr %r4,%r4 # char *
- jg sys_init_module # branch to system call
-
-ENTRY(sys_delete_module_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # unsigned int
- jg sys_delete_module # branch to system call
-
-ENTRY(sys32_quotactl_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # qid_t
- llgtr %r5,%r5 # caddr_t
- jg sys_quotactl # branch to system call
-
-ENTRY(sys32_getpgid_wrapper)
- lgfr %r2,%r2 # pid_t
- jg sys_getpgid # branch to system call
-
-ENTRY(sys32_fchdir_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_fchdir # branch to system call
-
-ENTRY(sys32_bdflush_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # long
- jg sys_bdflush # branch to system call
-
-ENTRY(sys32_sysfs_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- jg sys_sysfs # branch to system call
-
-ENTRY(sys32_personality_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_s390_personality # branch to system call
-
-ENTRY(sys32_setfsuid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- jg sys32_setfsuid16 # branch to system call
-
-ENTRY(sys32_setfsgid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- jg sys32_setfsgid16 # branch to system call
-
-ENTRY(sys32_llseek_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- llgtr %r5,%r5 # loff_t *
- llgfr %r6,%r6 # unsigned int
- jg sys_llseek # branch to system call
-
-ENTRY(sys32_getdents_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # void *
- llgfr %r4,%r4 # unsigned int
- jg compat_sys_getdents # branch to system call
-
-ENTRY(compat_sys_select_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # compat_fd_set *
- llgtr %r4,%r4 # compat_fd_set *
- llgtr %r5,%r5 # compat_fd_set *
- llgtr %r6,%r6 # struct compat_timeval *
- jg compat_sys_select # branch to system call
-
-ENTRY(sys32_flock_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- jg sys_flock # branch to system call
-
-ENTRY(sys32_msync_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- lgfr %r4,%r4 # int
- jg sys_msync # branch to system call
-
-ENTRY(compat_sys_readv_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const struct compat_iovec *
- llgfr %r4,%r4 # unsigned long
- jg compat_sys_readv # branch to system call
-
-ENTRY(compat_sys_writev_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const struct compat_iovec *
- llgfr %r4,%r4 # unsigned long
- jg compat_sys_writev # branch to system call
-
-ENTRY(sys32_getsid_wrapper)
- lgfr %r2,%r2 # pid_t
- jg sys_getsid # branch to system call
-
-ENTRY(sys32_fdatasync_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_fdatasync # branch to system call
-
-ENTRY(sys32_mlock_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- jg sys_mlock # branch to system call
-
-ENTRY(sys32_munlock_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- jg sys_munlock # branch to system call
-
-ENTRY(sys32_mlockall_wrapper)
- lgfr %r2,%r2 # int
- jg sys_mlockall # branch to system call
-
-#sys32_munlockall_wrapper # void
-
-ENTRY(sys32_sched_setparam_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # struct sched_param *
- jg sys_sched_setparam # branch to system call
-
-ENTRY(sys32_sched_getparam_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # struct sched_param *
- jg sys_sched_getparam # branch to system call
-
-ENTRY(sys32_sched_setscheduler_wrapper)
- lgfr %r2,%r2 # pid_t
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # struct sched_param *
- jg sys_sched_setscheduler # branch to system call
-
-ENTRY(sys32_sched_getscheduler_wrapper)
- lgfr %r2,%r2 # pid_t
- jg sys_sched_getscheduler # branch to system call
-
-#sys32_sched_yield_wrapper # void
-
-ENTRY(sys32_sched_get_priority_max_wrapper)
- lgfr %r2,%r2 # int
- jg sys_sched_get_priority_max # branch to system call
-
-ENTRY(sys32_sched_get_priority_min_wrapper)
- lgfr %r2,%r2 # int
- jg sys_sched_get_priority_min # branch to system call
-
-ENTRY(compat_sys_nanosleep_wrapper)
- llgtr %r2,%r2 # struct compat_timespec *
- llgtr %r3,%r3 # struct compat_timespec *
- jg compat_sys_nanosleep # branch to system call
-
-ENTRY(sys32_mremap_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- llgfr %r5,%r5 # unsigned long
- llgfr %r6,%r6 # unsigned long
- jg sys_mremap # branch to system call
-
-ENTRY(sys32_setresuid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_uid_emu31_t
- jg sys32_setresuid16 # branch to system call
-
-ENTRY(sys32_getresuid16_wrapper)
- llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
- llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
- llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
- jg sys32_getresuid16 # branch to system call
-
-ENTRY(sys32_poll_wrapper)
- llgtr %r2,%r2 # struct pollfd *
- llgfr %r3,%r3 # unsigned int
- lgfr %r4,%r4 # int
- jg sys_poll # branch to system call
-
-ENTRY(sys32_setresgid16_wrapper)
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- llgfr %r3,%r3 # __kernel_old_gid_emu31_t
- llgfr %r4,%r4 # __kernel_old_gid_emu31_t
- jg sys32_setresgid16 # branch to system call
-
-ENTRY(sys32_getresgid16_wrapper)
- llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
- llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
- llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
- jg sys32_getresgid16 # branch to system call
-
-ENTRY(sys32_prctl_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- llgfr %r5,%r5 # unsigned long
- llgfr %r6,%r6 # unsigned long
- jg sys_prctl # branch to system call
-
-#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
-
-ENTRY(sys32_pread64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- llgfr %r5,%r5 # u32
- llgfr %r6,%r6 # u32
- jg sys32_pread64 # branch to system call
-
-ENTRY(sys32_pwrite64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # size_t
- llgfr %r5,%r5 # u32
- llgfr %r6,%r6 # u32
- jg sys32_pwrite64 # branch to system call
-
-ENTRY(sys32_chown16_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_gid_emu31_t
- jg sys32_chown16 # branch to system call
-
-ENTRY(sys32_getcwd_wrapper)
- llgtr %r2,%r2 # char *
- llgfr %r3,%r3 # unsigned long
- jg sys_getcwd # branch to system call
-
-ENTRY(sys32_capget_wrapper)
- llgtr %r2,%r2 # cap_user_header_t
- llgtr %r3,%r3 # cap_user_data_t
- jg sys_capget # branch to system call
-
-ENTRY(sys32_capset_wrapper)
- llgtr %r2,%r2 # cap_user_header_t
- llgtr %r3,%r3 # const cap_user_data_t
- jg sys_capset # branch to system call
-
-#sys32_vfork_wrapper # done in vfork_glue
-
-ENTRY(sys32_truncate64_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- jg sys32_truncate64 # branch to system call
-
-ENTRY(sys32_ftruncate64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- jg sys32_ftruncate64 # branch to system call
-
-ENTRY(sys32_lchown_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # uid_t
- llgfr %r4,%r4 # gid_t
- jg sys_lchown # branch to system call
-
-#sys32_getuid_wrapper # void
-#sys32_getgid_wrapper # void
-#sys32_geteuid_wrapper # void
-#sys32_getegid_wrapper # void
-
-ENTRY(sys32_setreuid_wrapper)
- llgfr %r2,%r2 # uid_t
- llgfr %r3,%r3 # uid_t
- jg sys_setreuid # branch to system call
-
-ENTRY(sys32_setregid_wrapper)
- llgfr %r2,%r2 # gid_t
- llgfr %r3,%r3 # gid_t
- jg sys_setregid # branch to system call
-
-ENTRY(sys32_getgroups_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # gid_t *
- jg sys_getgroups # branch to system call
-
-ENTRY(sys32_setgroups_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # gid_t *
- jg sys_setgroups # branch to system call
-
-ENTRY(sys32_fchown_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # uid_t
- llgfr %r4,%r4 # gid_t
- jg sys_fchown # branch to system call
-
-ENTRY(sys32_setresuid_wrapper)
- llgfr %r2,%r2 # uid_t
- llgfr %r3,%r3 # uid_t
- llgfr %r4,%r4 # uid_t
- jg sys_setresuid # branch to system call
-
-ENTRY(sys32_getresuid_wrapper)
- llgtr %r2,%r2 # uid_t *
- llgtr %r3,%r3 # uid_t *
- llgtr %r4,%r4 # uid_t *
- jg sys_getresuid # branch to system call
-
-ENTRY(sys32_setresgid_wrapper)
- llgfr %r2,%r2 # gid_t
- llgfr %r3,%r3 # gid_t
- llgfr %r4,%r4 # gid_t
- jg sys_setresgid # branch to system call
-
-ENTRY(sys32_getresgid_wrapper)
- llgtr %r2,%r2 # gid_t *
- llgtr %r3,%r3 # gid_t *
- llgtr %r4,%r4 # gid_t *
- jg sys_getresgid # branch to system call
-
-ENTRY(sys32_chown_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # uid_t
- llgfr %r4,%r4 # gid_t
- jg sys_chown # branch to system call
-
-ENTRY(sys32_setuid_wrapper)
- llgfr %r2,%r2 # uid_t
- jg sys_setuid # branch to system call
-
-ENTRY(sys32_setgid_wrapper)
- llgfr %r2,%r2 # gid_t
- jg sys_setgid # branch to system call
-
-ENTRY(sys32_setfsuid_wrapper)
- llgfr %r2,%r2 # uid_t
- jg sys_setfsuid # branch to system call
-
-ENTRY(sys32_setfsgid_wrapper)
- llgfr %r2,%r2 # gid_t
- jg sys_setfsgid # branch to system call
-
-ENTRY(sys32_pivot_root_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- jg sys_pivot_root # branch to system call
-
-ENTRY(sys32_mincore_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- llgtr %r4,%r4 # unsigned char *
- jg sys_mincore # branch to system call
-
-ENTRY(sys32_madvise_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
- lgfr %r4,%r4 # int
- jg sys_madvise # branch to system call
-
-ENTRY(sys32_getdents64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # void *
- llgfr %r4,%r4 # unsigned int
- jg sys_getdents64 # branch to system call
-
-ENTRY(compat_sys_fcntl64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- llgfr %r4,%r4 # unsigned long
- jg compat_sys_fcntl64 # branch to system call
-
-ENTRY(sys32_stat64_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct stat64 *
- jg sys32_stat64 # branch to system call
-
-ENTRY(sys32_lstat64_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct stat64 *
- jg sys32_lstat64 # branch to system call
-
-ENTRY(sys32_stime_wrapper)
- llgtr %r2,%r2 # long *
- jg compat_sys_stime # branch to system call
-
-ENTRY(sys32_fstat64_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgtr %r3,%r3 # struct stat64 *
- jg sys32_fstat64 # branch to system call
-
-ENTRY(sys32_setxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- lgfr %r6,%r6 # int
- jg sys_setxattr
-
-ENTRY(sys32_lsetxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- lgfr %r6,%r6 # int
- jg sys_lsetxattr
-
-ENTRY(sys32_fsetxattr_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- lgfr %r6,%r6 # int
- jg sys_fsetxattr
-
-ENTRY(sys32_getxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- jg sys_getxattr
-
-ENTRY(sys32_lgetxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- jg sys_lgetxattr
-
-ENTRY(sys32_fgetxattr_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # void *
- llgfr %r5,%r5 # size_t
- jg sys_fgetxattr
-
-ENTRY(sys32_listxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- jg sys_listxattr
-
-ENTRY(sys32_llistxattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- jg sys_llistxattr
-
-ENTRY(sys32_flistxattr_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- jg sys_flistxattr
-
-ENTRY(sys32_removexattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- jg sys_removexattr
-
-ENTRY(sys32_lremovexattr_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # char *
- jg sys_lremovexattr
-
-ENTRY(sys32_fremovexattr_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # char *
- jg sys_fremovexattr
-
-ENTRY(sys32_sched_setaffinity_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # unsigned int
- llgtr %r4,%r4 # unsigned long *
- jg compat_sys_sched_setaffinity
-
-ENTRY(sys32_sched_getaffinity_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # unsigned int
- llgtr %r4,%r4 # unsigned long *
- jg compat_sys_sched_getaffinity
-
-ENTRY(sys32_exit_group_wrapper)
- lgfr %r2,%r2 # int
- jg sys_exit_group # branch to system call
-
-ENTRY(sys32_set_tid_address_wrapper)
- llgtr %r2,%r2 # int *
- jg sys_set_tid_address # branch to system call
-
-ENTRY(sys_epoll_create_wrapper)
- lgfr %r2,%r2 # int
- jg sys_epoll_create # branch to system call
-
-ENTRY(sys_epoll_ctl_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # struct epoll_event *
- jg sys_epoll_ctl # branch to system call
-
-ENTRY(sys_epoll_wait_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct epoll_event *
- lgfr %r4,%r4 # int
- lgfr %r5,%r5 # int
- jg sys_epoll_wait # branch to system call
-
-ENTRY(sys32_fadvise64_wrapper)
- lgfr %r2,%r2 # int
- sllg %r3,%r3,32 # get high word of 64bit loff_t
- or %r3,%r4 # get low word of 64bit loff_t
- llgfr %r4,%r5 # size_t (unsigned long)
- lgfr %r5,%r6 # int
- jg sys32_fadvise64
-
-ENTRY(sys32_fadvise64_64_wrapper)
- llgtr %r2,%r2 # struct fadvise64_64_args *
- jg sys32_fadvise64_64
-
-ENTRY(sys32_clock_settime_wrapper)
- lgfr %r2,%r2 # clockid_t (int)
- llgtr %r3,%r3 # struct compat_timespec *
- jg compat_sys_clock_settime
-
-ENTRY(sys32_clock_gettime_wrapper)
- lgfr %r2,%r2 # clockid_t (int)
- llgtr %r3,%r3 # struct compat_timespec *
- jg compat_sys_clock_gettime
-
-ENTRY(sys32_clock_getres_wrapper)
- lgfr %r2,%r2 # clockid_t (int)
- llgtr %r3,%r3 # struct compat_timespec *
- jg compat_sys_clock_getres
-
-ENTRY(sys32_clock_nanosleep_wrapper)
- lgfr %r2,%r2 # clockid_t (int)
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # struct compat_timespec *
- llgtr %r5,%r5 # struct compat_timespec *
- jg compat_sys_clock_nanosleep
-
-ENTRY(sys32_timer_create_wrapper)
- lgfr %r2,%r2 # timer_t (int)
- llgtr %r3,%r3 # struct compat_sigevent *
- llgtr %r4,%r4 # timer_t *
- jg compat_sys_timer_create
-
-ENTRY(sys32_timer_settime_wrapper)
- lgfr %r2,%r2 # timer_t (int)
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # struct compat_itimerspec *
- llgtr %r5,%r5 # struct compat_itimerspec *
- jg compat_sys_timer_settime
-
-ENTRY(sys32_timer_gettime_wrapper)
- lgfr %r2,%r2 # timer_t (int)
- llgtr %r3,%r3 # struct compat_itimerspec *
- jg compat_sys_timer_gettime
-
-ENTRY(sys32_timer_getoverrun_wrapper)
- lgfr %r2,%r2 # timer_t (int)
- jg sys_timer_getoverrun
-
-ENTRY(sys32_timer_delete_wrapper)
- lgfr %r2,%r2 # timer_t (int)
- jg sys_timer_delete
-
-ENTRY(sys32_io_setup_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # u32 *
- jg compat_sys_io_setup
-
-ENTRY(sys32_io_destroy_wrapper)
- llgfr %r2,%r2 # (aio_context_t) u32
- jg sys_io_destroy
-
-ENTRY(sys32_io_getevents_wrapper)
- llgfr %r2,%r2 # (aio_context_t) u32
- lgfr %r3,%r3 # long
- lgfr %r4,%r4 # long
- llgtr %r5,%r5 # struct io_event *
- llgtr %r6,%r6 # struct compat_timespec *
- jg compat_sys_io_getevents
-
-ENTRY(sys32_io_submit_wrapper)
- llgfr %r2,%r2 # (aio_context_t) u32
- lgfr %r3,%r3 # long
- llgtr %r4,%r4 # struct iocb **
- jg compat_sys_io_submit
-
-ENTRY(sys32_io_cancel_wrapper)
- llgfr %r2,%r2 # (aio_context_t) u32
- llgtr %r3,%r3 # struct iocb *
- llgtr %r4,%r4 # struct io_event *
- jg sys_io_cancel
-
-ENTRY(compat_sys_statfs64_wrapper)
- llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # compat_size_t
- llgtr %r4,%r4 # struct compat_statfs64 *
- jg compat_sys_statfs64
-
-ENTRY(compat_sys_fstatfs64_wrapper)
- llgfr %r2,%r2 # unsigned int fd
- llgfr %r3,%r3 # compat_size_t
- llgtr %r4,%r4 # struct compat_statfs64 *
- jg compat_sys_fstatfs64
-
-ENTRY(compat_sys_mq_open_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- llgfr %r4,%r4 # mode_t
- llgtr %r5,%r5 # struct compat_mq_attr *
- jg compat_sys_mq_open
-
-ENTRY(sys32_mq_unlink_wrapper)
- llgtr %r2,%r2 # const char *
- jg sys_mq_unlink
-
-ENTRY(compat_sys_mq_timedsend_wrapper)
- lgfr %r2,%r2 # mqd_t
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # size_t
- llgfr %r5,%r5 # unsigned int
- llgtr %r6,%r6 # const struct compat_timespec *
- jg compat_sys_mq_timedsend
-
-ENTRY(compat_sys_mq_timedreceive_wrapper)
- lgfr %r2,%r2 # mqd_t
- llgtr %r3,%r3 # char *
- llgfr %r4,%r4 # size_t
- llgtr %r5,%r5 # unsigned int *
- llgtr %r6,%r6 # const struct compat_timespec *
- jg compat_sys_mq_timedreceive
-
-ENTRY(compat_sys_mq_notify_wrapper)
- lgfr %r2,%r2 # mqd_t
- llgtr %r3,%r3 # struct compat_sigevent *
- jg compat_sys_mq_notify
-
-ENTRY(compat_sys_mq_getsetattr_wrapper)
- lgfr %r2,%r2 # mqd_t
- llgtr %r3,%r3 # struct compat_mq_attr *
- llgtr %r4,%r4 # struct compat_mq_attr *
- jg compat_sys_mq_getsetattr
-
-ENTRY(compat_sys_add_key_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- llgtr %r4,%r4 # const void *
- llgfr %r5,%r5 # size_t
- llgfr %r6,%r6 # (key_serial_t) u32
- jg sys_add_key
-
-ENTRY(compat_sys_request_key_wrapper)
- llgtr %r2,%r2 # const char *
- llgtr %r3,%r3 # const char *
- llgtr %r4,%r4 # const void *
- llgfr %r5,%r5 # (key_serial_t) u32
- jg sys_request_key
-
-ENTRY(sys32_remap_file_pages_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # unsigned long
- llgfr %r4,%r4 # unsigned long
- llgfr %r5,%r5 # unsigned long
- llgfr %r6,%r6 # unsigned long
- jg sys_remap_file_pages
-
-ENTRY(compat_sys_kexec_load_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # unsigned long
- llgtr %r4,%r4 # struct kexec_segment *
- llgfr %r5,%r5 # unsigned long
- jg compat_sys_kexec_load
-
-ENTRY(sys_ioprio_set_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- jg sys_ioprio_set
-
-ENTRY(sys_ioprio_get_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_ioprio_get
-
-ENTRY(sys_inotify_add_watch_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # u32
- jg sys_inotify_add_watch
-
-ENTRY(sys_inotify_rm_watch_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # u32
- jg sys_inotify_rm_watch
-
-ENTRY(sys_mkdirat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- jg sys_mkdirat
-
-ENTRY(sys_mknodat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- llgfr %r5,%r5 # unsigned int
- jg sys_mknodat
-
-ENTRY(sys_fchownat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # uid_t
- llgfr %r5,%r5 # gid_t
- lgfr %r6,%r6 # int
- jg sys_fchownat
-
-ENTRY(compat_sys_futimesat_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # struct timeval *
- jg compat_sys_futimesat
-
-ENTRY(sys32_fstatat64_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # struct stat64 *
- lgfr %r5,%r5 # int
- jg sys32_fstatat64
-
-ENTRY(sys_unlinkat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- jg sys_unlinkat
-
-ENTRY(sys_renameat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # const char *
- jg sys_renameat
-
-ENTRY(sys_linkat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # const char *
- lgfr %r6,%r6 # int
- jg sys_linkat
-
-ENTRY(sys_symlinkat_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # const char *
- jg sys_symlinkat
-
-ENTRY(sys_readlinkat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- llgtr %r4,%r4 # char *
- lgfr %r5,%r5 # int
- jg sys_readlinkat
-
-ENTRY(sys_fchmodat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- llgfr %r4,%r4 # mode_t
- jg sys_fchmodat
-
-ENTRY(sys_faccessat_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- jg sys_faccessat
-
-ENTRY(compat_sys_pselect6_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # fd_set *
- llgtr %r4,%r4 # fd_set *
- llgtr %r5,%r5 # fd_set *
- llgtr %r6,%r6 # struct timespec *
- llgt %r0,164(%r15) # void *
- stg %r0,160(%r15)
- jg compat_sys_pselect6
-
-ENTRY(compat_sys_ppoll_wrapper)
- llgtr %r2,%r2 # struct pollfd *
- llgfr %r3,%r3 # unsigned int
- llgtr %r4,%r4 # struct timespec *
- llgtr %r5,%r5 # const sigset_t *
- llgfr %r6,%r6 # size_t
- jg compat_sys_ppoll
-
-ENTRY(sys_unshare_wrapper)
- llgfr %r2,%r2 # unsigned long
- jg sys_unshare
-
-ENTRY(sys_splice_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # loff_t *
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # loff_t *
- llgfr %r6,%r6 # size_t
- llgf %r0,164(%r15) # unsigned int
- stg %r0,160(%r15)
- jg sys_splice
-
-ENTRY(sys_sync_file_range_wrapper)
- lgfr %r2,%r2 # int
- sllg %r3,%r3,32 # get high word of 64bit loff_t
- or %r3,%r4 # get low word of 64bit loff_t
- sllg %r4,%r5,32 # get high word of 64bit loff_t
- or %r4,%r6 # get low word of 64bit loff_t
- llgf %r5,164(%r15) # unsigned int
- jg sys_sync_file_range
-
-ENTRY(sys_tee_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- llgfr %r4,%r4 # size_t
- llgfr %r5,%r5 # unsigned int
- jg sys_tee
-
-ENTRY(sys_getcpu_wrapper)
- llgtr %r2,%r2 # unsigned *
- llgtr %r3,%r3 # unsigned *
- llgtr %r4,%r4 # struct getcpu_cache *
- jg sys_getcpu
-
-ENTRY(compat_sys_utimes_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # struct compat_timeval *
- jg compat_sys_utimes
-
-ENTRY(compat_sys_utimensat_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # char *
- llgtr %r4,%r4 # struct compat_timespec *
- lgfr %r5,%r5 # int
- jg compat_sys_utimensat
-
-ENTRY(sys_eventfd_wrapper)
- llgfr %r2,%r2 # unsigned int
- jg sys_eventfd
-
-ENTRY(sys_fallocate_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- sllg %r4,%r4,32 # get high word of 64bit loff_t
- lr %r4,%r5 # get low word of 64bit loff_t
- sllg %r5,%r6,32 # get high word of 64bit loff_t
- l %r5,164(%r15) # get low word of 64bit loff_t
- jg sys_fallocate
-
-ENTRY(sys_timerfd_create_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_timerfd_create
-
-ENTRY(sys_eventfd2_wrapper)
- llgfr %r2,%r2 # unsigned int
- lgfr %r3,%r3 # int
- jg sys_eventfd2
-
-ENTRY(sys_inotify_init1_wrapper)
- lgfr %r2,%r2 # int
- jg sys_inotify_init1
-
-ENTRY(sys_pipe2_wrapper)
- llgtr %r2,%r2 # u32 *
- lgfr %r3,%r3 # int
- jg sys_pipe2 # branch to system call
-
-ENTRY(sys_dup3_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- lgfr %r4,%r4 # int
- jg sys_dup3 # branch to system call
-
-ENTRY(sys_epoll_create1_wrapper)
- lgfr %r2,%r2 # int
- jg sys_epoll_create1 # branch to system call
-
-ENTRY(sys32_readahead_wrapper)
- lgfr %r2,%r2 # int
- llgfr %r3,%r3 # u32
- llgfr %r4,%r4 # u32
- lgfr %r5,%r5 # s32
- jg sys32_readahead # branch to system call
-
-ENTRY(sys_tkill_wrapper)
- lgfr %r2,%r2 # pid_t
- lgfr %r3,%r3 # int
- jg sys_tkill # branch to system call
-
-ENTRY(sys_tgkill_wrapper)
- lgfr %r2,%r2 # pid_t
- lgfr %r3,%r3 # pid_t
- lgfr %r4,%r4 # int
- jg sys_tgkill # branch to system call
-
-ENTRY(compat_sys_keyctl_wrapper)
- llgfr %r2,%r2 # u32
- llgfr %r3,%r3 # u32
- llgfr %r4,%r4 # u32
- llgfr %r5,%r5 # u32
- llgfr %r6,%r6 # u32
- jg compat_sys_keyctl # branch to system call
-
-ENTRY(sys_perf_event_open_wrapper)
- llgtr %r2,%r2 # const struct perf_event_attr *
- lgfr %r3,%r3 # pid_t
- lgfr %r4,%r4 # int
- lgfr %r5,%r5 # int
- llgfr %r6,%r6 # unsigned long
- jg sys_perf_event_open # branch to system call
-
-ENTRY(sys_clone_wrapper)
- llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # unsigned long
- llgtr %r4,%r4 # int *
- llgtr %r5,%r5 # int *
- jg sys_clone # branch to system call
-
-ENTRY(sys32_execve_wrapper)
- llgtr %r2,%r2 # char *
- llgtr %r3,%r3 # compat_uptr_t *
- llgtr %r4,%r4 # compat_uptr_t *
- jg compat_sys_execve # branch to system call
-
-ENTRY(sys_fanotify_init_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
- jg sys_fanotify_init # branch to system call
-
-ENTRY(sys_prlimit64_wrapper)
- lgfr %r2,%r2 # pid_t
- llgfr %r3,%r3 # unsigned int
- llgtr %r4,%r4 # const struct rlimit64 __user *
- llgtr %r5,%r5 # struct rlimit64 __user *
- jg sys_prlimit64 # branch to system call
-
-ENTRY(sys_name_to_handle_at_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char __user *
- llgtr %r4,%r4 # struct file_handle __user *
- llgtr %r5,%r5 # int __user *
- lgfr %r6,%r6 # int
- jg sys_name_to_handle_at
-
-ENTRY(compat_sys_clock_adjtime_wrapper)
- lgfr %r2,%r2 # clockid_t (int)
- llgtr %r3,%r3 # struct compat_timex __user *
- jg compat_sys_clock_adjtime
-
-ENTRY(sys_syncfs_wrapper)
- lgfr %r2,%r2 # int
- jg sys_syncfs
-
-ENTRY(sys_setns_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_setns
-
-ENTRY(compat_sys_process_vm_readv_wrapper)
- lgfr %r2,%r2 # compat_pid_t
- llgtr %r3,%r3 # struct compat_iovec __user *
- llgfr %r4,%r4 # unsigned long
- llgtr %r5,%r5 # struct compat_iovec __user *
- llgfr %r6,%r6 # unsigned long
- llgf %r0,164(%r15) # unsigned long
- stg %r0,160(%r15)
- jg compat_sys_process_vm_readv
-
-ENTRY(compat_sys_process_vm_writev_wrapper)
- lgfr %r2,%r2 # compat_pid_t
- llgtr %r3,%r3 # struct compat_iovec __user *
- llgfr %r4,%r4 # unsigned long
- llgtr %r5,%r5 # struct compat_iovec __user *
- llgfr %r6,%r6 # unsigned long
- llgf %r0,164(%r15) # unsigned long
- stg %r0,160(%r15)
- jg compat_sys_process_vm_writev
-
-ENTRY(sys_s390_runtime_instr_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- jg sys_s390_runtime_instr
-
-ENTRY(sys_kcmp_wrapper)
- lgfr %r2,%r2 # pid_t
- lgfr %r3,%r3 # pid_t
- lgfr %r4,%r4 # int
- llgfr %r5,%r5 # unsigned long
- llgfr %r6,%r6 # unsigned long
- jg sys_kcmp
-
-ENTRY(sys_finit_module_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const char __user *
- lgfr %r4,%r4 # int
- jg sys_finit_module
-
-ENTRY(sys_sched_setattr_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # struct sched_attr __user *
- jg sys_sched_setattr
-
-ENTRY(sys_sched_getattr_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # const char __user *
- llgfr %r3,%r3 # unsigned int
- jg sys_sched_getattr
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
new file mode 100644
index 0000000..824c39d
--- /dev/null
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -0,0 +1,215 @@
+/*
+ * Compat sytem call wrappers.
+ *
+ * Copyright IBM Corp. 2014
+ */
+
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include "entry.h"
+
+#define COMPAT_SYSCALL_WRAP1(name, ...) \
+ COMPAT_SYSCALL_WRAPx(1, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP2(name, ...) \
+ COMPAT_SYSCALL_WRAPx(2, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP3(name, ...) \
+ COMPAT_SYSCALL_WRAPx(3, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP4(name, ...) \
+ COMPAT_SYSCALL_WRAPx(4, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP5(name, ...) \
+ COMPAT_SYSCALL_WRAPx(5, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP6(name, ...) \
+ COMPAT_SYSCALL_WRAPx(6, _##name, __VA_ARGS__)
+
+#define __SC_COMPAT_TYPE(t, a) \
+ __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
+
+#define __SC_COMPAT_CAST(t, a) \
+({ \
+ long __ReS = a; \
+ \
+ BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \
+ !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t)); \
+ if (__TYPE_IS_L(t)) \
+ __ReS = (s32)a; \
+ if (__TYPE_IS_UL(t)) \
+ __ReS = (u32)a; \
+ if (__TYPE_IS_PTR(t)) \
+ __ReS = a & 0x7fffffff; \
+ (t)__ReS; \
+})
+
+/*
+ * The COMPAT_SYSCALL_WRAP macro generates system call wrappers to be used by
+ * compat tasks. These wrappers will only be used for system calls where only
+ * the system call arguments need sign or zero extension or zeroing of the upper
+ * 33 bits of pointers.
+ * Note: since the wrapper function will afterwards call a system call which
+ * again performs zero and sign extension for all system call arguments with
+ * a size of less than eight bytes, these compat wrappers only touch those
+ * system call arguments with a size of eight bytes ((unsigned) long and
+ * pointers). Zero and sign extension for e.g. int parameters will be done by
+ * the regular system call wrappers.
+ */
+#define COMPAT_SYSCALL_WRAPx(x, name, ...) \
+ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
+ asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \
+ { \
+ return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \
+ }
+
+COMPAT_SYSCALL_WRAP1(exit, int, error_code);
+COMPAT_SYSCALL_WRAP1(close, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
+COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
+COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename);
+COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
+COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
+COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
+COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds);
+COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
+COMPAT_SYSCALL_WRAP1(nice, int, increment);
+COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
+COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
+COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes);
+COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
+COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
+COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
+COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
+COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid);
+COMPAT_SYSCALL_WRAP1(umask, int, mask);
+COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
+COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd);
+COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
+COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
+COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
+COMPAT_SYSCALL_WRAP3(readlink, const char __user *, path, char __user *, buf, int, bufsiz);
+COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library);
+COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
+COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
+COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
+COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode);
+COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who);
+COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval);
+COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
+COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
+COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
+COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
+COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
+COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
+COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
+COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
+COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
+COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality);
+COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
+COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd);
+COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
+COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
+COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
+COMPAT_SYSCALL_WRAP1(mlockall, int, flags);
+COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy);
+COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy);
+COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
+COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
+COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
+COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size);
+COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
+COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
+COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid);
+COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid);
+COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
+COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
+COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid);
+COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
+COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid);
+COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
+COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid);
+COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid);
+COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid);
+COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid);
+COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
+COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
+COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
+COMPAT_SYSCALL_WRAP5(setxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP5(lsetxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP5(fsetxattr, int, fd, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count);
+COMPAT_SYSCALL_WRAP4(getxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP4(lgetxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP3(listxattr, const char __user *, path, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP3(llistxattr, const char __user *, path, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
+COMPAT_SYSCALL_WRAP1(exit_group, int, error_code);
+COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
+COMPAT_SYSCALL_WRAP1(epoll_create, int, size);
+COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
+COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
+COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id);
+COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id);
+COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
+COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
+COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
+COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
+COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
+COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
+COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio);
+COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who);
+COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
+COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd);
+COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
+COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
+COMPAT_SYSCALL_WRAP3(unlinkat, int, dfd, const char __user *, pathname, int, flag);
+COMPAT_SYSCALL_WRAP4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname);
+COMPAT_SYSCALL_WRAP5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags);
+COMPAT_SYSCALL_WRAP3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname);
+COMPAT_SYSCALL_WRAP4(readlinkat, int, dfd, const char __user *, path, char __user *, buf, int, bufsiz);
+COMPAT_SYSCALL_WRAP3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode);
+COMPAT_SYSCALL_WRAP3(faccessat, int, dfd, const char __user *, filename, int, mode);
+COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags);
+COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
+COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count);
+COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags);
+COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags);
+COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags);
+COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
+COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags);
+COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
+COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
+COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val);
+COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
+COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
+COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
+COMPAT_SYSCALL_WRAP1(syncfs, int, fd);
+COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype);
+COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum);
+COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
+COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
+COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index cb533f7..6ac7819 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -67,9 +67,7 @@
struct fadvise64_64_args;
struct old_sigaction;
-long sys_sigreturn(void);
-long sys_rt_sigreturn(void);
-long sys32_sigreturn(void);
-long sys32_rt_sigreturn(void);
+long sys_s390_personality(unsigned int personality);
+long sys_s390_runtime_instr(int command, int signum);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 1439921..542ef48 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -9,349 +9,349 @@
#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
NI_SYSCALL /* 0 */
-SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
+SYSCALL(sys_exit,sys_exit,compat_sys_exit)
SYSCALL(sys_fork,sys_fork,sys_fork)
-SYSCALL(sys_read,sys_read,sys32_read_wrapper)
-SYSCALL(sys_write,sys_write,sys32_write_wrapper)
+SYSCALL(sys_read,sys_read,compat_sys_s390_read)
+SYSCALL(sys_write,sys_write,compat_sys_s390_write)
SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */
-SYSCALL(sys_close,sys_close,sys32_close_wrapper)
+SYSCALL(sys_close,sys_close,compat_sys_close)
SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
-SYSCALL(sys_link,sys_link,sys32_link_wrapper)
-SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
-SYSCALL(sys_execve,sys_execve,sys32_execve_wrapper)
-SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
-SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
-SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
-SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */
-SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/
+SYSCALL(sys_creat,sys_creat,compat_sys_creat)
+SYSCALL(sys_link,sys_link,compat_sys_link)
+SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */
+SYSCALL(sys_execve,sys_execve,compat_sys_execve)
+SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir)
+SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */
+SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod)
+SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */
+SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
NI_SYSCALL /* old break syscall holder */
NI_SYSCALL /* old stat syscall holder */
SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
-SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
-SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
-SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/
-SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/
-SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */
-SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
-SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
+SYSCALL(sys_mount,sys_mount,compat_sys_mount)
+SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount)
+SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
+SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
+SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
+SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace)
+SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm)
NI_SYSCALL /* old fstat syscall */
SYSCALL(sys_pause,sys_pause,sys_pause)
-SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper) /* 30 */
+SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */
NI_SYSCALL /* old stty syscall */
NI_SYSCALL /* old gtty syscall */
-SYSCALL(sys_access,sys_access,sys32_access_wrapper)
-SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
+SYSCALL(sys_access,sys_access,compat_sys_access)
+SYSCALL(sys_nice,sys_nice,compat_sys_nice)
NI_SYSCALL /* 35 old ftime syscall */
SYSCALL(sys_sync,sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
-SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
-SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
-SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper) /* 40 */
-SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
-SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
-SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
+SYSCALL(sys_kill,sys_kill,compat_sys_kill)
+SYSCALL(sys_rename,sys_rename,compat_sys_rename)
+SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir)
+SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */
+SYSCALL(sys_dup,sys_dup,compat_sys_dup)
+SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe)
+SYSCALL(sys_times,sys_times,compat_sys_times)
NI_SYSCALL /* old prof syscall */
-SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper) /* 45 */
-SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper) /* old setgid16 syscall*/
-SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/
-SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
-SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */
-SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16) /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
-SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
+SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */
+SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
+SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
+SYSCALL(sys_signal,sys_signal,compat_sys_signal)
+SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
+SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
+SYSCALL(sys_acct,sys_acct,compat_sys_acct)
+SYSCALL(sys_umount,sys_umount,compat_sys_umount)
NI_SYSCALL /* old lock syscall */
-SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
-SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper) /* 55 */
+SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl)
+SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */
NI_SYSCALL /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
+SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid)
NI_SYSCALL /* old ulimit syscall */
NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper) /* 60 */
-SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
-SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
-SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
+SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */
+SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot)
+SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat)
+SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2)
SYSCALL(sys_getppid,sys_getppid,sys_getppid)
SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
SYSCALL(sys_setsid,sys_setsid,sys_setsid)
SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
NI_SYSCALL /* old sgetmask syscall*/
NI_SYSCALL /* old ssetmask syscall*/
-SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */
-SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,sys_sigsuspend,sys_sigsuspend_wrapper)
-SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
-SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
-SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */
-SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
+SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
+SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
+SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend)
+SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending)
+SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname)
+SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */
+SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit)
SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday_wrapper)
-SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday_wrapper)
-SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */
-SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */
+SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday)
+SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday)
+SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
+SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
NI_SYSCALL /* old select syscall */
-SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
+SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink)
NI_SYSCALL /* old lstat syscall */
-SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper) /* 85 */
-SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
-SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
-SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
-SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
-SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */
-SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
+SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */
+SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib)
+SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon)
+SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot)
+SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
+SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
+SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap)
SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
-SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
-SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
+SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority)
+SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority)
NI_SYSCALL /* old profil syscall */
-SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
-SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */
+SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs)
+SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */
NI_SYSCALL /* ioperm for i386 */
-SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
-SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
+SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall)
+SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog)
SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */
-SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
-SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
-SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
+SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat)
+SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat)
+SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat)
NI_SYSCALL /* old uname syscall */
SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
NI_SYSCALL /* old "idle" system call */
NI_SYSCALL /* vm86old for i386 */
SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
+SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */
+SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo)
SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
-SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
-SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */
-SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
-SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper)
+SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync)
+SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn)
+SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */
+SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname)
+SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname)
NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
-SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
+SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex)
+SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */
SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper)
-SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper)
+SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module)
+SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module)
NI_SYSCALL /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
-SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
-SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
-SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
-SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */
-SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper)
+SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl)
+SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid)
+SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush)
+SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */
+SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality)
NI_SYSCALL /* for afs_syscall */
-SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */
-SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper) /* 140 */
-SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
-SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
-SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
-SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
-SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */
-SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
-SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
-SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
+SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
+SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
+SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */
+SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents)
+SYSCALL(sys_select,sys_select,compat_sys_select)
+SYSCALL(sys_flock,sys_flock,compat_sys_flock)
+SYSCALL(sys_msync,sys_msync,compat_sys_msync)
+SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */
+SYSCALL(sys_writev,sys_writev,compat_sys_writev)
+SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid)
+SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync)
SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */
-SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
-SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
+SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */
+SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock)
+SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall)
SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
-SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */
-SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
+SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam)
+SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
+SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler)
+SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler)
SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */
+SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
-SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
-SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */
-SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old getresuid16 syscall */
+SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep)
+SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap)
+SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
+SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
NI_SYSCALL /* for vm86 */
NI_SYSCALL /* old sys_query_module */
-SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
+SYSCALL(sys_poll,sys_poll,compat_sys_poll)
NI_SYSCALL /* old nfsservctl */
-SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
-SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
-SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
-SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
+SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
+SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
+SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl)
+SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn)
SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */
-SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
-SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */
-SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
-SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
-SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
+SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */
+SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64)
+SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
+SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd)
+SYSCALL(sys_capget,sys_capget,compat_sys_capget)
+SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */
SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
NI_SYSCALL /* streams1 */
NI_SYSCALL /* streams2 */
SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
-SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
-SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
-SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
-SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
-SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper) /* 195 */
-SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
-SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
-SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
+SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit)
+SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2)
+SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64)
+SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64)
+SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
+SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64)
+SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64)
+SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown)
SYSCALL(sys_getuid,sys_getuid,sys_getuid)
SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
SYSCALL(sys_getegid,sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
-SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
-SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper) /* 205 */
-SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
-SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
-SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
-SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
-SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper) /* 210 */
-SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
-SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
-SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
-SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
-SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
-SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
-SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
-SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
-SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */
-SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
-SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper)
+SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid)
+SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid)
+SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */
+SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups)
+SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown)
+SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid)
+SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */
+SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid)
+SYSCALL(sys_chown,sys_chown,compat_sys_chown)
+SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid)
+SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid)
+SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */
+SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root)
+SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore)
+SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise)
+SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */
+SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64)
+SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead)
SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
-SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
-SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
-SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
-SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
-SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
-SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper) /* 230 */
-SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
-SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
-SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
-SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
-SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */
+SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr)
+SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
+SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr)
+SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr)
+SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr)
+SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr)
+SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */
+SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr)
+SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr)
+SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr)
+SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr)
+SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper)
+SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill)
SYSCALL(sys_futex,sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
-SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper)
+SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity)
+SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
+SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill)
NI_SYSCALL /* reserved for TUX */
-SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
-SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
-SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper) /* 245 */
-SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
-SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
-SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
-SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
-SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */
-SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
-SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
-SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
-SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
-SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */
-SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
-SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
-SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
-SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */
-SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
-SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup)
+SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy)
+SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */
+SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit)
+SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel)
+SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group)
+SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
+SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait)
+SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address)
+SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64)
+SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create)
+SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */
+SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime)
+SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime)
+SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
+SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres)
+SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep)
NI_SYSCALL /* reserved for vserver */
-SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
-SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
-SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
-SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
+SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64)
+SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64)
+SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64)
+SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages)
NI_SYSCALL /* 268 sys_mbind */
NI_SYSCALL /* 269 sys_get_mempolicy */
NI_SYSCALL /* 270 sys_set_mempolicy */
-SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
-SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
-SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
-SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
-SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
-SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
-SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper)
-SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
-SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper) /* 280 */
+SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open)
+SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink)
+SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend)
+SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive)
+SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */
+SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr)
+SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load)
+SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key)
+SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key)
+SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper)
-SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper)
+SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set)
+SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get)
SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper) /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper)
+SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
+SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
NI_SYSCALL /* 287 sys_migrate_pages */
SYSCALL(sys_openat,sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
-SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */
-SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
-SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper)
-SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper)
-SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper)
-SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */
-SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper)
-SYSCALL(sys_symlinkat,sys_symlinkat,sys_symlinkat_wrapper)
-SYSCALL(sys_readlinkat,sys_readlinkat,sys_readlinkat_wrapper)
-SYSCALL(sys_fchmodat,sys_fchmodat,sys_fchmodat_wrapper)
-SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper) /* 300 */
-SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper)
-SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper)
-SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper)
+SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat)
+SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */
+SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat)
+SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat)
+SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64)
+SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat)
+SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */
+SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat)
+SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat)
+SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat)
+SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat)
+SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */
+SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6)
+SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll)
+SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare)
SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
-SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
-SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
+SYSCALL(sys_splice,sys_splice,compat_sys_splice)
+SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range)
+SYSCALL(sys_tee,sys_tee,compat_sys_tee)
SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
NI_SYSCALL /* 310 sys_move_pages */
-SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
+SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu)
SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
-SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
-SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
+SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes)
+SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate)
+SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */
SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
NI_SYSCALL /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
-SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper)
+SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd)
+SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create)
SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper)
-SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper)
-SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */
-SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
-SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
+SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2)
+SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */
+SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3)
+SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1)
SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
-SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
+SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open)
+SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init)
SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
-SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
+SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64)
+SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
-SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
-SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
-SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
-SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
-SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
-SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
-SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
-SYSCALL(sys_sched_setattr,sys_sched_setattr,sys_sched_setattr_wrapper) /* 345 */
-SYSCALL(sys_sched_getattr,sys_sched_getattr,sys_sched_getattr_wrapper)
+SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime)
+SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs)
+SYSCALL(sys_setns,sys_setns,compat_sys_setns)
+SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
+SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev)
+SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
+SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
+SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
+SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 60c11a6..f91c0311 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -206,11 +206,13 @@
zdev->dma_table = NULL;
}
-static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
- int size)
+static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
+ unsigned long start, int size)
{
- unsigned long boundary_size = 0x1000000;
+ unsigned long boundary_size;
+ boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 146b9d5..4630cf2 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,10 +1,11 @@
header-y +=
+
generic-y += barrier.h
generic-y += clkdev.h
generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
generic-y += trace_clock.h
generic-y += xor.h
-generic-y += preempt.h
-
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 0cd7198..c19e47d 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -8,18 +8,21 @@
generic-y += errno.h
generic-y += exec.h
generic-y += fcntl.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mman.h
+generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
generic-y += poll.h
-generic-y += mman.h
-generic-y += msgbuf.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
@@ -34,5 +37,3 @@
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h
index 673515b..aa1b2b9 100644
--- a/arch/sh/include/cpu-sh2/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2/cpu/cache.h
@@ -18,7 +18,7 @@
#define SH_CACHE_ASSOC 8
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define CCR 0xffffffec
+#define SH_CCR 0xffffffec
#define CCR_CACHE_CE 0x01 /* Cache enable */
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h
index defb0ba..b27ce92 100644
--- a/arch/sh/include/cpu-sh2a/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2a/cpu/cache.h
@@ -17,8 +17,8 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xfffc1000 /* CCR1 */
-#define CCR2 0xfffc1004
+#define SH_CCR 0xfffc1000 /* CCR1 */
+#define SH_CCR2 0xfffc1004
/*
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h
index bee2d81..29700fd 100644
--- a/arch/sh/include/cpu-sh3/cpu/cache.h
+++ b/arch/sh/include/cpu-sh3/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xffffffec /* Address of Cache Control Register */
+#define SH_CCR 0xffffffec /* Address of Cache Control Register */
#define CCR_CACHE_CE 0x01 /* Cache Enable */
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h
index 7bfb9e8..92c4cd1 100644
--- a/arch/sh/include/cpu-sh4/cpu/cache.h
+++ b/arch/sh/include/cpu-sh4/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xff00001c /* Address of Cache Control Register */
+#define SH_CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ecf83cd..0d7360d 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -112,7 +112,7 @@
unsigned long ccr, flags;
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
@@ -189,7 +189,7 @@
l2_cache_init();
- __raw_writel(flags, CCR);
+ __raw_writel(flags, SH_CCR);
back_to_cached();
}
#else
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 2ea4483..be616ee 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -16,7 +16,6 @@
#include <linux/thread_info.h>
#include <linux/irqflags.h>
#include <linux/smp.h>
-#include <linux/cpuidle.h>
#include <linux/atomic.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
@@ -40,8 +39,7 @@
void arch_cpu_idle(void)
{
- if (cpuidle_idle_call())
- sh_idle();
+ sh_idle();
}
void __init select_idle_routine(void)
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 1157251..777e50f 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -36,7 +36,7 @@
*/
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
if ((ccr & CCR_CACHE_ENABLE) == 0) {
back_to_cached();
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index defcf71..a74259f 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -63,9 +63,9 @@
local_irq_save(flags);
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_INVALIDATE;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
back_to_cached();
local_irq_restore(flags);
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index 949e2d3..ee87d08 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -134,7 +134,8 @@
/* If there are too many pages then just blow the cache */
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = begin; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
@@ -167,7 +168,8 @@
/* I-Cache invalidate */
/* If there are too many pages then just blow the cache */
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = start; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 0e52928..51d8f7f 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -133,9 +133,9 @@
jump_to_uncached();
/* Flush I-cache */
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_ICI;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
/*
* back_to_cached() will take care of the barrier for us, don't add
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
index c0adbee..24c58b7 100644
--- a/arch/sh/mm/cache-shx3.c
+++ b/arch/sh/mm/cache-shx3.c
@@ -19,7 +19,7 @@
{
unsigned int ccr;
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* If we've got cache aliases, resolve them in hardware.
@@ -40,5 +40,5 @@
ccr |= CCR_CACHE_IBE;
#endif
- writel_uncached(ccr, CCR);
+ writel_uncached(ccr, SH_CCR);
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 616966a..097c2cd 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -285,8 +285,8 @@
{
unsigned int cache_disabled = 0;
-#ifdef CCR
- cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+#ifdef SH_CCR
+ cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
#endif
compute_alias(&boot_cpu_data.icache);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c51efdc..7d8b7e9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -27,7 +27,7 @@
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 4b60a0c..a458218 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,15 +6,16 @@
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += linkage.h
-generic-y += local64.h
-generic-y += mutex.h
+generic-y += hash.h
generic-y += irq_regs.h
+generic-y += linkage.h
generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += module.h
+generic-y += mutex.h
+generic-y += preempt.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index dd3bef4..0571039 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -32,7 +32,6 @@
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS];
-extern int sparc64_multi_core;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 1754390..a2d10fc 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -42,8 +42,6 @@
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
-#define mc_capable() (sparc64_multi_core)
-#define smt_capable() (sparc64_multi_core)
#endif /* CONFIG_SMP */
extern cpumask_t cpu_core_map[NR_CPUS];
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index b90bf23..a1a4400 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -896,10 +896,6 @@
mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
-#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
-#endif
-
hp = mdesc_grab();
set_core_ids(hp);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 32a280e..d7b4967 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -58,9 +58,12 @@
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -82,7 +85,6 @@
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index 6b39125..9a690d3 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -555,9 +555,6 @@
cpu_data(cpuid).core_id = portid + 1;
cpu_data(cpuid).proc_id = portid;
-#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
-#endif
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b085311..9781048 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -53,8 +53,6 @@
#include "cpumap.h"
-int sparc64_multi_core __read_mostly;
-
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 87729ff..33a17e7 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 869023a..cfbe53c 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
@@ -62,6 +63,7 @@
static pgd_t *srmmu_swapper_pg_dir;
const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+EXPORT_SYMBOL(sparc32_cachetlb_ops);
#ifdef CONFIG_SMP
const struct sparc32_cachetlb_ops *local_ops;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 3b3a360..f5d506f 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -273,7 +273,7 @@
prom_halt();
}
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i];
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 3793c75..0aa5675 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -11,6 +11,7 @@
generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -18,12 +19,14 @@
generic-y += irq_regs.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
@@ -38,5 +41,3 @@
generic-y += trace_clock.h
generic-y += types.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 88a330d..a5e4b60 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,8 +1,28 @@
-generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
-generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
-generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
-generic-y += switch_to.h clkdev.h
-generic-y += trace_clock.h
-generic-y += preempt.h
-generic-y += hash.h
generic-y += barrier.h
+generic-y += bug.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += io.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += mcs_spinlock.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += sections.h
+generic-y += switch_to.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += xor.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 3ef4f9d..1e5fb87 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -16,6 +16,7 @@
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -24,6 +25,7 @@
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h
@@ -32,6 +34,7 @@
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -60,5 +63,3 @@
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0af5250d..8453fe1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1585,6 +1585,20 @@
See Documentation/efi-stub.txt for more information.
+config EFI_MIXED
+ bool "EFI mixed-mode support"
+ depends on EFI_STUB && X86_64
+ ---help---
+ Enabling this feature allows a 64-bit kernel to be booted
+ on a 32-bit firmware, provided that your CPU supports 64-bit
+ mode.
+
+ Note that it is not possible to boot a mixed-mode enabled
+ kernel via the EFI boot stub - a bootloader that supports
+ the EFI handover protocol must be used.
+
+ If unsure, say N.
+
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca..f3aaf23 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -341,10 +341,6 @@
def_bool y
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-config X86_OOSTORE
- def_bool y
- depends on (MWINCHIP3D || MWINCHIPC6) && MTRR
-
#
# P6_NOPs are a relatively minor optimization that require a family >=
# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 321a52c..61bd2ad 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -81,6 +81,15 @@
kernel.
If in doubt, say "N"
+config EFI_PGT_DUMP
+ bool "Dump the EFI pagetable"
+ depends on EFI && X86_PTDUMP
+ ---help---
+ Enable this if you want to dump the EFI page table before
+ enabling virtual mode. This can be used to debug miscellaneous
+ issues with the mapping of the EFI runtime regions into that
+ table.
+
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
default y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index eeda43a..3b9348a 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -82,8 +82,8 @@
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
- # Don't autogenerate MMX or SSE instructions
- KBUILD_CFLAGS += -mno-mmx -mno-sse
+ # Don't autogenerate traditional x87, MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
# Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
@@ -152,6 +152,7 @@
# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
+asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 878df7e..abb9eba 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -80,7 +80,7 @@
$(obj)/voffset.h: vmlinux FORCE
$(call if_changed,voffset)
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 90a21f4..4dbf967 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -111,7 +111,7 @@
};
#define MEM_AVOID_MAX 5
-struct mem_vector mem_avoid[MEM_AVOID_MAX];
+static struct mem_vector mem_avoid[MEM_AVOID_MAX];
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
{
@@ -180,7 +180,7 @@
}
/* Does this memory vector overlap a known avoided area? */
-bool mem_avoid_overlap(struct mem_vector *img)
+static bool mem_avoid_overlap(struct mem_vector *img)
{
int i;
@@ -192,8 +192,9 @@
return false;
}
-unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
-unsigned long slot_max = 0;
+static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
+ CONFIG_PHYSICAL_ALIGN];
+static unsigned long slot_max;
static void slots_append(unsigned long addr)
{
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index a7677ba..1e61461 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -19,11 +19,273 @@
static efi_system_table_t *sys_table;
+static struct efi_config *efi_early;
+
+#define efi_call_early(f, ...) \
+ efi_early->call(efi_early->f, __VA_ARGS__);
+
+#define BOOT_SERVICES(bits) \
+static void setup_boot_services##bits(struct efi_config *c) \
+{ \
+ efi_system_table_##bits##_t *table; \
+ efi_boot_services_##bits##_t *bt; \
+ \
+ table = (typeof(table))sys_table; \
+ \
+ c->text_output = table->con_out; \
+ \
+ bt = (typeof(bt))(unsigned long)(table->boottime); \
+ \
+ c->allocate_pool = bt->allocate_pool; \
+ c->allocate_pages = bt->allocate_pages; \
+ c->get_memory_map = bt->get_memory_map; \
+ c->free_pool = bt->free_pool; \
+ c->free_pages = bt->free_pages; \
+ c->locate_handle = bt->locate_handle; \
+ c->handle_protocol = bt->handle_protocol; \
+ c->exit_boot_services = bt->exit_boot_services; \
+}
+BOOT_SERVICES(32);
+BOOT_SERVICES(64);
+
+static void efi_printk(efi_system_table_t *, char *);
+static void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+
+static efi_status_t
+__file_size32(void *__fh, efi_char16_t *filename_16,
+ void **handle, u64 *file_sz)
+{
+ efi_file_handle_32_t *h, *fh = __fh;
+ efi_file_info_t *info;
+ efi_status_t status;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ u32 info_sz;
+
+ status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to open file: ");
+ efi_char16_printk(sys_table, filename_16);
+ efi_printk(sys_table, "\n");
+ return status;
+ }
+
+ *handle = h;
+
+ info_sz = 0;
+ status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+ &info_sz, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ efi_printk(sys_table, "Failed to get file info size\n");
+ return status;
+ }
+
+grow:
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ info_sz, (void **)&info);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to alloc mem for file info\n");
+ return status;
+ }
+
+ status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+ &info_sz, info);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_early(free_pool, info);
+ goto grow;
+ }
+
+ *file_sz = info->file_size;
+ efi_call_early(free_pool, info);
+
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table, "Failed to get initrd info\n");
+
+ return status;
+}
+
+static efi_status_t
+__file_size64(void *__fh, efi_char16_t *filename_16,
+ void **handle, u64 *file_sz)
+{
+ efi_file_handle_64_t *h, *fh = __fh;
+ efi_file_info_t *info;
+ efi_status_t status;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ u32 info_sz;
+
+ status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to open file: ");
+ efi_char16_printk(sys_table, filename_16);
+ efi_printk(sys_table, "\n");
+ return status;
+ }
+
+ *handle = h;
+
+ info_sz = 0;
+ status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+ &info_sz, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ efi_printk(sys_table, "Failed to get file info size\n");
+ return status;
+ }
+
+grow:
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ info_sz, (void **)&info);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to alloc mem for file info\n");
+ return status;
+ }
+
+ status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+ &info_sz, info);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_early(free_pool, info);
+ goto grow;
+ }
+
+ *file_sz = info->file_size;
+ efi_call_early(free_pool, info);
+
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table, "Failed to get initrd info\n");
+
+ return status;
+}
+static efi_status_t
+efi_file_size(efi_system_table_t *sys_table, void *__fh,
+ efi_char16_t *filename_16, void **handle, u64 *file_sz)
+{
+ if (efi_early->is64)
+ return __file_size64(__fh, filename_16, handle, file_sz);
+
+ return __file_size32(__fh, filename_16, handle, file_sz);
+}
+
+static inline efi_status_t
+efi_file_read(void *__fh, void *handle, unsigned long *size, void *addr)
+{
+ unsigned long func;
+
+ if (efi_early->is64) {
+ efi_file_handle_64_t *fh = __fh;
+
+ func = (unsigned long)fh->read;
+ return efi_early->call(func, handle, size, addr);
+ } else {
+ efi_file_handle_32_t *fh = __fh;
+
+ func = (unsigned long)fh->read;
+ return efi_early->call(func, handle, size, addr);
+ }
+}
+
+static inline efi_status_t efi_file_close(void *__fh, void *handle)
+{
+ if (efi_early->is64) {
+ efi_file_handle_64_t *fh = __fh;
+
+ return efi_early->call((unsigned long)fh->close, handle);
+ } else {
+ efi_file_handle_32_t *fh = __fh;
+
+ return efi_early->call((unsigned long)fh->close, handle);
+ }
+}
+
+static inline efi_status_t __open_volume32(void *__image, void **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_loaded_image_32_t *image = __image;
+ efi_file_handle_32_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)image->device_handle;
+ unsigned long func;
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ func = (unsigned long)io->open_volume;
+ status = efi_early->call(func, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table, "Failed to open volume\n");
+
+ *__fh = fh;
+ return status;
+}
+
+static inline efi_status_t __open_volume64(void *__image, void **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_loaded_image_64_t *image = __image;
+ efi_file_handle_64_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)image->device_handle;
+ unsigned long func;
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ func = (unsigned long)io->open_volume;
+ status = efi_early->call(func, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table, "Failed to open volume\n");
+
+ *__fh = fh;
+ return status;
+}
+
+static inline efi_status_t
+efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
+{
+ if (efi_early->is64)
+ return __open_volume64(__image, __fh);
+
+ return __open_volume32(__image, __fh);
+}
+
+static void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
+{
+ unsigned long output_string;
+ size_t offset;
+
+ if (efi_early->is64) {
+ struct efi_simple_text_output_protocol_64 *out;
+ u64 *func;
+
+ offset = offsetof(typeof(*out), output_string);
+ output_string = efi_early->text_output + offset;
+ func = (u64 *)output_string;
+
+ efi_early->call(*func, efi_early->text_output, str);
+ } else {
+ struct efi_simple_text_output_protocol_32 *out;
+ u32 *func;
+
+ offset = offsetof(typeof(*out), output_string);
+ output_string = efi_early->text_output + offset;
+ func = (u32 *)output_string;
+
+ efi_early->call(*func, efi_early->text_output, str);
+ }
+}
#include "../../../../drivers/firmware/efi/efi-stub-helper.c"
-
-
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
{
u8 first, len;
@@ -47,48 +309,87 @@
*size = len;
}
-static efi_status_t setup_efi_pci(struct boot_params *params)
+static efi_status_t
+__setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
{
- efi_pci_io_protocol *pci;
+ struct pci_setup_rom *rom = NULL;
efi_status_t status;
- void **pci_handle;
+ unsigned long size;
+ uint64_t attributes;
+
+ status = efi_early->call(pci->attributes, pci,
+ EfiPciIoAttributeOperationGet, 0, 0,
+ &attributes);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (!pci->romimage || !pci->romsize)
+ return EFI_INVALID_PARAMETER;
+
+ size = pci->romsize + sizeof(*rom);
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ memset(rom, 0, sizeof(*rom));
+
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
+ *__rom = rom;
+
+ status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+ PCI_VENDOR_ID, 1, &(rom->vendor));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+ PCI_DEVICE_ID, 1, &(rom->devid));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_early->call(pci->get_location, pci, &(rom->segment),
+ &(rom->bus), &(rom->device), &(rom->function));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, pci->romimage, pci->romsize);
+ return status;
+
+free_struct:
+ efi_call_early(free_pool, rom);
+ return status;
+}
+
+static efi_status_t
+setup_efi_pci32(struct boot_params *params, void **pci_handle,
+ unsigned long size)
+{
+ efi_pci_io_protocol_32 *pci = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long nr_pci, size = 0;
- int i;
+ u32 *handles = (u32 *)(unsigned long)pci_handle;
+ efi_status_t status;
+ unsigned long nr_pci;
struct setup_data *data;
+ int i;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, size, &pci_handle);
-
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &size, pci_handle);
- }
-
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- nr_pci = size / sizeof(void *);
+ nr_pci = size / sizeof(u32);
for (i = 0; i < nr_pci; i++) {
- void *h = pci_handle[i];
- uint64_t attributes;
- struct pci_setup_rom *rom;
+ struct pci_setup_rom *rom = NULL;
+ u32 h = handles[i];
- status = efi_call_phys3(sys_table->boottime->handle_protocol,
- h, &pci_proto, &pci);
+ status = efi_call_early(handle_protocol, h,
+ &pci_proto, (void **)&pci);
if (status != EFI_SUCCESS)
continue;
@@ -96,57 +397,10 @@
if (!pci)
continue;
-#ifdef CONFIG_X86_64
- status = efi_call_phys4(pci->attributes, pci,
- EfiPciIoAttributeOperationGet, 0,
- &attributes);
-#else
- status = efi_call_phys5(pci->attributes, pci,
- EfiPciIoAttributeOperationGet, 0, 0,
- &attributes);
-#endif
+ status = __setup_efi_pci32(pci, &rom);
if (status != EFI_SUCCESS)
continue;
- if (!pci->romimage || !pci->romsize)
- continue;
-
- size = pci->romsize + sizeof(*rom);
-
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, size, &rom);
-
- if (status != EFI_SUCCESS)
- continue;
-
- rom->data.type = SETUP_PCI;
- rom->data.len = size - sizeof(struct setup_data);
- rom->data.next = 0;
- rom->pcilen = pci->romsize;
-
- status = efi_call_phys5(pci->pci.read, pci,
- EfiPciIoWidthUint16, PCI_VENDOR_ID,
- 1, &(rom->vendor));
-
- if (status != EFI_SUCCESS)
- goto free_struct;
-
- status = efi_call_phys5(pci->pci.read, pci,
- EfiPciIoWidthUint16, PCI_DEVICE_ID,
- 1, &(rom->devid));
-
- if (status != EFI_SUCCESS)
- goto free_struct;
-
- status = efi_call_phys5(pci->get_location, pci,
- &(rom->segment), &(rom->bus),
- &(rom->device), &(rom->function));
-
- if (status != EFI_SUCCESS)
- goto free_struct;
-
- memcpy(rom->romdata, pci->romimage, pci->romsize);
-
if (data)
data->next = (unsigned long)rom;
else
@@ -154,105 +408,155 @@
data = (struct setup_data *)rom;
- continue;
- free_struct:
- efi_call_phys1(sys_table->boottime->free_pool, rom);
}
-free_handle:
- efi_call_phys1(sys_table->boottime->free_pool, pci_handle);
return status;
}
-/*
- * See if we have Graphics Output Protocol
- */
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
+static efi_status_t
+__setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
{
- struct efi_graphics_output_protocol *gop, *first_gop;
- struct efi_pixel_bitmask pixel_info;
- unsigned long nr_gops;
+ struct pci_setup_rom *rom;
efi_status_t status;
- void **gop_handle;
- u16 width, height;
- u32 fb_base, fb_size;
- u32 pixels_per_scan_line;
- int pixel_format;
- int i;
+ unsigned long size;
+ uint64_t attributes;
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, size, &gop_handle);
+ status = efi_early->call(pci->attributes, pci,
+ EfiPciIoAttributeOperationGet, 0,
+ &attributes);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, proto,
- NULL, &size, gop_handle);
+ if (!pci->romimage || !pci->romsize)
+ return EFI_INVALID_PARAMETER;
+
+ size = pci->romsize + sizeof(*rom);
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return status;
- first_gop = NULL;
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
+ *__rom = rom;
- nr_gops = size / sizeof(void *);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_mode_info *info;
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
- void *dummy;
- void *h = gop_handle[i];
+ status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+ PCI_VENDOR_ID, 1, &(rom->vendor));
- status = efi_call_phys3(sys_table->boottime->handle_protocol,
- h, proto, &gop);
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+ PCI_DEVICE_ID, 1, &(rom->devid));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_early->call(pci->get_location, pci, &(rom->segment),
+ &(rom->bus), &(rom->device), &(rom->function));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, pci->romimage, pci->romsize);
+ return status;
+
+free_struct:
+ efi_call_early(free_pool, rom);
+ return status;
+
+}
+
+static efi_status_t
+setup_efi_pci64(struct boot_params *params, void **pci_handle,
+ unsigned long size)
+{
+ efi_pci_io_protocol_64 *pci = NULL;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ u64 *handles = (u64 *)(unsigned long)pci_handle;
+ efi_status_t status;
+ unsigned long nr_pci;
+ struct setup_data *data;
+ int i;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ nr_pci = size / sizeof(u64);
+ for (i = 0; i < nr_pci; i++) {
+ struct pci_setup_rom *rom = NULL;
+ u64 h = handles[i];
+
+ status = efi_call_early(handle_protocol, h,
+ &pci_proto, (void **)&pci);
+
if (status != EFI_SUCCESS)
continue;
- status = efi_call_phys3(sys_table->boottime->handle_protocol,
- h, &conout_proto, &dummy);
+ if (!pci)
+ continue;
- if (status == EFI_SUCCESS)
- conout_found = true;
+ status = __setup_efi_pci64(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
- status = efi_call_phys4(gop->query_mode, gop,
- gop->mode->mode, &size, &info);
- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- fb_base = gop->mode->frame_buffer_base;
- fb_size = gop->mode->frame_buffer_size;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
- first_gop = gop;
- if (conout_found)
- break;
- }
+ data = (struct setup_data *)rom;
+
}
- /* Did we find any GOPs? */
- if (!first_gop)
+ return status;
+}
+
+static efi_status_t setup_efi_pci(struct boot_params *params)
+{
+ efi_status_t status;
+ void **pci_handle = NULL;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long size = 0;
+
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_call_early(allocate_pool,
+ EFI_LOADER_DATA,
+ size, (void **)&pci_handle);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &size, pci_handle);
+ }
+
+ if (status != EFI_SUCCESS)
goto free_handle;
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
+ if (efi_early->is64)
+ status = setup_efi_pci64(params, pci_handle, size);
+ else
+ status = setup_efi_pci32(params, pci_handle, size);
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
- si->pages = 1;
+free_handle:
+ efi_call_early(free_pool, pci_handle);
+ return status;
+}
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+ struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->lfb_depth = 32;
si->lfb_linelength = pixels_per_scan_line * 4;
@@ -297,62 +601,274 @@
si->rsvd_size = 0;
si->rsvd_pos = 0;
}
+}
+
+static efi_status_t
+__gop_query32(struct efi_graphics_output_protocol_32 *gop32,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u32 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_32 *mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop32->mode;
+ mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+
+ status = efi_early->call(gop32->query_mode, gop32,
+ mode->mode, size, info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop32(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status;
+ u32 *handles = (u32 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop32 = NULL;
+
+ nr_gops = size / sizeof(u32);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ u32 h = handles[i];
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop32);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query32(gop32, &info, &size, &fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop32;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+ return status;
+}
-free_handle:
- efi_call_phys1(sys_table->boottime->free_pool, gop_handle);
+static efi_status_t
+__gop_query64(struct efi_graphics_output_protocol_64 *gop64,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u32 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_64 *mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop64->mode;
+ mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+
+ status = efi_early->call(gop64->query_mode, gop64,
+ mode->mode, size, info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop64(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status;
+ u64 *handles = (u64 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop64 = NULL;
+
+ nr_gops = size / sizeof(u64);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ u64 h = handles[i];
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop64);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query64(gop64, &info, &size, &fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop64;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
return status;
}
/*
- * See if we have Universal Graphics Adapter (UGA) protocol
+ * See if we have Graphics Output Protocol
*/
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size)
{
- struct efi_uga_draw_protocol *uga, *first_uga;
- unsigned long nr_ugas;
efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- int i;
+ void **gop_handle = NULL;
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, size, &uga_handle);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&gop_handle);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, uga_proto,
- NULL, &size, uga_handle);
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ proto, NULL, &size, gop_handle);
if (status != EFI_SUCCESS)
goto free_handle;
- first_uga = NULL;
+ if (efi_early->is64)
+ status = setup_gop64(si, proto, size, gop_handle);
+ else
+ status = setup_gop32(si, proto, size, gop_handle);
- nr_ugas = size / sizeof(void *);
+free_handle:
+ efi_call_early(free_pool, gop_handle);
+ return status;
+}
+
+static efi_status_t
+setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+{
+ struct efi_uga_draw_protocol *uga = NULL, *first_uga;
+ efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ unsigned long nr_ugas;
+ u32 *handles = (u32 *)uga_handle;;
+ efi_status_t status;
+ int i;
+
+ first_uga = NULL;
+ nr_ugas = size / sizeof(u32);
for (i = 0; i < nr_ugas; i++) {
efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- void *handle = uga_handle[i];
u32 w, h, depth, refresh;
void *pciio;
+ u32 handle = handles[i];
- status = efi_call_phys3(sys_table->boottime->handle_protocol,
- handle, uga_proto, &uga);
+ status = efi_call_early(handle_protocol, handle,
+ &uga_proto, (void **)&uga);
if (status != EFI_SUCCESS)
continue;
- efi_call_phys3(sys_table->boottime->handle_protocol,
- handle, &pciio_proto, &pciio);
+ efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
- status = efi_call_phys5(uga->get_mode, uga, &w, &h,
- &depth, &refresh);
+ status = efi_early->call((unsigned long)uga->get_mode, uga,
+ &w, &h, &depth, &refresh);
if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
+ *width = w;
+ *height = h;
/*
* Once we've found a UGA supporting PCIIO,
@@ -365,7 +881,84 @@
}
}
- if (!first_uga)
+ return status;
+}
+
+static efi_status_t
+setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+{
+ struct efi_uga_draw_protocol *uga = NULL, *first_uga;
+ efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ unsigned long nr_ugas;
+ u64 *handles = (u64 *)uga_handle;;
+ efi_status_t status;
+ int i;
+
+ first_uga = NULL;
+ nr_ugas = size / sizeof(u64);
+ for (i = 0; i < nr_ugas; i++) {
+ efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ u32 w, h, depth, refresh;
+ void *pciio;
+ u64 handle = handles[i];
+
+ status = efi_call_early(handle_protocol, handle,
+ &uga_proto, (void **)&uga);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
+
+ status = efi_early->call((unsigned long)uga->get_mode, uga,
+ &w, &h, &depth, &refresh);
+ if (status == EFI_SUCCESS && (!first_uga || pciio)) {
+ *width = w;
+ *height = h;
+
+ /*
+ * Once we've found a UGA supporting PCIIO,
+ * don't bother looking any further.
+ */
+ if (pciio)
+ break;
+
+ first_uga = uga;
+ }
+ }
+
+ return status;
+}
+
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
+static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
+ unsigned long size)
+{
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ height = 0;
+ width = 0;
+
+ if (efi_early->is64)
+ status = setup_uga64(uga_handle, size, &width, &height);
+ else
+ status = setup_uga32(uga_handle, size, &width, &height);
+
+ if (!width && !height)
goto free_handle;
/* EFI framebuffer */
@@ -384,9 +977,8 @@
si->rsvd_size = 8;
si->rsvd_pos = 24;
-
free_handle:
- efi_call_phys1(sys_table->boottime->free_pool, uga_handle);
+ efi_call_early(free_pool, uga_handle);
return status;
}
@@ -404,29 +996,28 @@
memset(si, 0, sizeof(*si));
size = 0;
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, &graphics_proto,
- NULL, &size, gop_handle);
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ &graphics_proto, NULL, &size, gop_handle);
if (status == EFI_BUFFER_TOO_SMALL)
status = setup_gop(si, &graphics_proto, size);
if (status != EFI_SUCCESS) {
size = 0;
- status = efi_call_phys5(sys_table->boottime->locate_handle,
- EFI_LOCATE_BY_PROTOCOL, &uga_proto,
- NULL, &size, uga_handle);
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ &uga_proto, NULL, &size, uga_handle);
if (status == EFI_BUFFER_TOO_SMALL)
setup_uga(si, &uga_proto, size);
}
}
-
/*
* Because the x86 boot code expects to be passed a boot_params we
* need to create one ourselves (usually the bootloader would create
* one for us).
*/
-struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
+struct boot_params *make_boot_params(struct efi_config *c)
{
struct boot_params *boot_params;
struct sys_desc_table *sdt;
@@ -434,7 +1025,7 @@
struct setup_header *hdr;
struct efi_info *efi;
efi_loaded_image_t *image;
- void *options;
+ void *options, *handle;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
efi_status_t status;
@@ -445,14 +1036,21 @@
unsigned long ramdisk_addr;
unsigned long ramdisk_size;
- sys_table = _table;
+ efi_early = c;
+ sys_table = (efi_system_table_t *)(unsigned long)efi_early->table;
+ handle = (void *)(unsigned long)efi_early->image_handle;
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
return NULL;
- status = efi_call_phys3(sys_table->boottime->handle_protocol,
- handle, &proto, (void *)&image);
+ if (efi_early->is64)
+ setup_boot_services64(efi_early);
+ else
+ setup_boot_services32(efi_early);
+
+ status = efi_call_early(handle_protocol, handle,
+ &proto, (void *)&image);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
return NULL;
@@ -641,14 +1239,13 @@
sizeof(struct e820entry) * nr_desc;
if (*e820ext) {
- efi_call_phys1(sys_table->boottime->free_pool, *e820ext);
+ efi_call_early(free_pool, *e820ext);
*e820ext = NULL;
*e820ext_size = 0;
}
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, size, e820ext);
-
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)e820ext);
if (status == EFI_SUCCESS)
*e820ext_size = size;
@@ -656,12 +1253,13 @@
}
static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle)
+ void *handle, bool is64)
{
struct efi_info *efi = &boot_params->efi_info;
unsigned long map_sz, key, desc_size;
efi_memory_desc_t *mem_map;
struct setup_data *e820ext;
+ const char *signature;
__u32 e820ext_size;
__u32 nr_desc, prev_nr_desc;
efi_status_t status;
@@ -691,11 +1289,13 @@
if (status != EFI_SUCCESS)
goto free_mem_map;
- efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+ efi_call_early(free_pool, mem_map);
goto get_map; /* Allocated memory, get map again */
}
- memcpy(&efi->efi_loader_signature, EFI_LOADER_SIGNATURE, sizeof(__u32));
+ signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+
efi->efi_systab = (unsigned long)sys_table;
efi->efi_memdesc_size = desc_size;
efi->efi_memdesc_version = desc_version;
@@ -708,8 +1308,7 @@
#endif
/* Might as well exit boot services now */
- status = efi_call_phys2(sys_table->boottime->exit_boot_services,
- handle, key);
+ status = efi_call_early(exit_boot_services, handle, key);
if (status != EFI_SUCCESS) {
/*
* ExitBootServices() will fail if any of the event
@@ -722,7 +1321,7 @@
goto free_mem_map;
called_exit = true;
- efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+ efi_call_early(free_pool, mem_map);
goto get_map;
}
@@ -736,23 +1335,31 @@
return EFI_SUCCESS;
free_mem_map:
- efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+ efi_call_early(free_pool, mem_map);
return status;
}
-
/*
* On success we return a pointer to a boot_params structure, and NULL
* on failure.
*/
-struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
+struct boot_params *efi_main(struct efi_config *c,
struct boot_params *boot_params)
{
- struct desc_ptr *gdt;
+ struct desc_ptr *gdt = NULL;
efi_loaded_image_t *image;
struct setup_header *hdr = &boot_params->hdr;
efi_status_t status;
struct desc_struct *desc;
+ void *handle;
+ efi_system_table_t *_table;
+ bool is64;
+
+ efi_early = c;
+
+ _table = (efi_system_table_t *)(unsigned long)efi_early->table;
+ handle = (void *)(unsigned long)efi_early->image_handle;
+ is64 = efi_early->is64;
sys_table = _table;
@@ -760,13 +1367,17 @@
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
+ if (is64)
+ setup_boot_services64(efi_early);
+ else
+ setup_boot_services32(efi_early);
+
setup_graphics(boot_params);
setup_efi_pci(boot_params);
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, sizeof(*gdt),
- (void **)&gdt);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
goto fail;
@@ -797,7 +1408,7 @@
hdr->code32_start = bzimage_addr;
}
- status = exit_boot(boot_params, handle);
+ status = exit_boot(boot_params, handle, is64);
if (status != EFI_SUCCESS)
goto fail;
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index 81b6b65..c88c31e 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -37,6 +37,24 @@
u32 pixels_per_scan_line;
} __packed;
+struct efi_graphics_output_protocol_mode_32 {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+ u32 max_mode;
+ u32 mode;
+ u64 info;
+ u64 size_of_info;
+ u64 frame_buffer_base;
+ u64 frame_buffer_size;
+} __packed;
+
struct efi_graphics_output_protocol_mode {
u32 max_mode;
u32 mode;
@@ -46,6 +64,20 @@
unsigned long frame_buffer_size;
} __packed;
+struct efi_graphics_output_protocol_32 {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+ u64 query_mode;
+ u64 set_mode;
+ u64 blt;
+ u64 mode;
+};
+
struct efi_graphics_output_protocol {
void *query_mode;
unsigned long set_mode;
@@ -53,10 +85,38 @@
struct efi_graphics_output_protocol_mode *mode;
};
+struct efi_uga_draw_protocol_32 {
+ u32 get_mode;
+ u32 set_mode;
+ u32 blt;
+};
+
+struct efi_uga_draw_protocol_64 {
+ u64 get_mode;
+ u64 set_mode;
+ u64 blt;
+};
+
struct efi_uga_draw_protocol {
void *get_mode;
void *set_mode;
void *blt;
};
+struct efi_config {
+ u64 image_handle;
+ u64 table;
+ u64 allocate_pool;
+ u64 allocate_pages;
+ u64 get_memory_map;
+ u64 free_pool;
+ u64 free_pages;
+ u64 locate_handle;
+ u64 handle_protocol;
+ u64 exit_boot_services;
+ u64 text_output;
+ efi_status_t (*call)(unsigned long, ...);
+ bool is64;
+} __packed;
+
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
index cedc60d..7ff3632 100644
--- a/arch/x86/boot/compressed/efi_stub_64.S
+++ b/arch/x86/boot/compressed/efi_stub_64.S
@@ -1 +1,30 @@
+#include <asm/segment.h>
+#include <asm/msr.h>
+#include <asm/processor-flags.h>
+
#include "../../platform/efi/efi_stub_64.S"
+
+#ifdef CONFIG_EFI_MIXED
+ .code64
+ .text
+ENTRY(efi64_thunk)
+ push %rbp
+ push %rbx
+
+ subq $16, %rsp
+ leaq efi_exit32(%rip), %rax
+ movl %eax, 8(%rsp)
+ leaq efi_gdt64(%rip), %rax
+ movl %eax, 4(%rsp)
+ movl %eax, 2(%rax) /* Fixup the gdt base address */
+ leaq efi32_boot_gdt(%rip), %rax
+ movl %eax, (%rsp)
+
+ call __efi64_thunk
+
+ addq $16, %rsp
+ pop %rbx
+ pop %rbp
+ ret
+ENDPROC(efi64_thunk)
+#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 9116aac..de9d420 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -42,26 +42,53 @@
ENTRY(efi_pe_entry)
add $0x4, %esp
+ call 1f
+1: popl %esi
+ subl $1b, %esi
+
+ popl %ecx
+ movl %ecx, efi32_config(%esi) /* Handle */
+ popl %ecx
+ movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
+
+ /* Relocate efi_config->call() */
+ leal efi32_config(%esi), %eax
+ add %esi, 88(%eax)
+ pushl %eax
+
call make_boot_params
cmpl $0, %eax
- je 1f
- movl 0x4(%esp), %esi
- movl (%esp), %ecx
+ je fail
+ popl %ecx
pushl %eax
- pushl %esi
pushl %ecx
- sub $0x4, %esp
+ jmp 2f /* Skip efi_config initialization */
-ENTRY(efi_stub_entry)
+ENTRY(efi32_stub_entry)
add $0x4, %esp
+ popl %ecx
+ popl %edx
+
+ call 1f
+1: popl %esi
+ subl $1b, %esi
+
+ movl %ecx, efi32_config(%esi) /* Handle */
+ movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
+
+ /* Relocate efi_config->call() */
+ leal efi32_config(%esi), %eax
+ add %esi, 88(%eax)
+ pushl %eax
+2:
call efi_main
cmpl $0, %eax
movl %eax, %esi
jne 2f
-1:
+fail:
/* EFI init failed, so hang. */
hlt
- jmp 1b
+ jmp fail
2:
call 3f
3:
@@ -202,6 +229,15 @@
xorl %ebx, %ebx
jmp *%eax
+#ifdef CONFIG_EFI_STUB
+ .data
+efi32_config:
+ .fill 11,8,0
+ .long efi_call_phys
+ .long 0
+ .byte 0
+#endif
+
/*
* Stack and heap for uncompression
*/
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c5c1ae0..57e58a5 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -113,7 +113,8 @@
lgdt gdt(%ebp)
/* Enable PAE mode */
- movl $(X86_CR4_PAE), %eax
+ movl %cr4, %eax
+ orl $X86_CR4_PAE, %eax
movl %eax, %cr4
/*
@@ -178,6 +179,13 @@
*/
pushl $__KERNEL_CS
leal startup_64(%ebp), %eax
+#ifdef CONFIG_EFI_MIXED
+ movl efi32_config(%ebp), %ebx
+ cmp $0, %ebx
+ jz 1f
+ leal handover_entry(%ebp), %eax
+1:
+#endif
pushl %eax
/* Enter paged protected Mode, activating Long Mode */
@@ -188,6 +196,30 @@
lret
ENDPROC(startup_32)
+#ifdef CONFIG_EFI_MIXED
+ .org 0x190
+ENTRY(efi32_stub_entry)
+ add $0x4, %esp /* Discard return address */
+ popl %ecx
+ popl %edx
+ popl %esi
+
+ leal (BP_scratch+4)(%esi), %esp
+ call 1f
+1: pop %ebp
+ subl $1b, %ebp
+
+ movl %ecx, efi32_config(%ebp)
+ movl %edx, efi32_config+8(%ebp)
+ sgdtl efi32_boot_gdt(%ebp)
+
+ leal efi32_config(%ebp), %eax
+ movl %eax, efi_config(%ebp)
+
+ jmp startup_32
+ENDPROC(efi32_stub_entry)
+#endif
+
.code64
.org 0x200
ENTRY(startup_64)
@@ -209,26 +241,48 @@
jmp preferred_addr
ENTRY(efi_pe_entry)
- mov %rcx, %rdi
- mov %rdx, %rsi
- pushq %rdi
- pushq %rsi
+ movq %rcx, efi64_config(%rip) /* Handle */
+ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
+
+ leaq efi64_config(%rip), %rax
+ movq %rax, efi_config(%rip)
+
+ call 1f
+1: popq %rbp
+ subq $1b, %rbp
+
+ /*
+ * Relocate efi_config->call().
+ */
+ addq %rbp, efi64_config+88(%rip)
+
+ movq %rax, %rdi
call make_boot_params
cmpq $0,%rax
- je 1f
- mov %rax, %rdx
- popq %rsi
- popq %rdi
+ je fail
+ mov %rax, %rsi
+ jmp 2f /* Skip the relocation */
-ENTRY(efi_stub_entry)
+handover_entry:
+ call 1f
+1: popq %rbp
+ subq $1b, %rbp
+
+ /*
+ * Relocate efi_config->call().
+ */
+ movq efi_config(%rip), %rax
+ addq %rbp, 88(%rax)
+2:
+ movq efi_config(%rip), %rdi
call efi_main
movq %rax,%rsi
cmpq $0,%rax
jne 2f
-1:
+fail:
/* EFI init failed, so hang. */
hlt
- jmp 1b
+ jmp fail
2:
call 3f
3:
@@ -307,6 +361,20 @@
leaq relocated(%rbx), %rax
jmp *%rax
+#ifdef CONFIG_EFI_STUB
+ .org 0x390
+ENTRY(efi64_stub_entry)
+ movq %rdi, efi64_config(%rip) /* Handle */
+ movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
+
+ leaq efi64_config(%rip), %rax
+ movq %rax, efi_config(%rip)
+
+ movq %rdx, %rsi
+ jmp handover_entry
+ENDPROC(efi64_stub_entry)
+#endif
+
.text
relocated:
@@ -372,6 +440,25 @@
.quad 0x0000000000000000 /* TS continued */
gdt_end:
+#ifdef CONFIG_EFI_STUB
+efi_config:
+ .quad 0
+
+#ifdef CONFIG_EFI_MIXED
+ .global efi32_config
+efi32_config:
+ .fill 11,8,0
+ .quad efi64_thunk
+ .byte 0
+#endif
+
+ .global efi64_config
+efi64_config:
+ .fill 11,8,0
+ .quad efi_call6
+ .byte 1
+#endif /* CONFIG_EFI_STUB */
+
/*
* Stack and heap for uncompression
*/
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 100a9a1..f0d0b20 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -67,6 +67,13 @@
cpu_vendor[2] == A32('M', 'x', '8', '6');
}
+static int is_intel(void)
+{
+ return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
+ cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
+ cpu_vendor[2] == A32('n', 't', 'e', 'l');
+}
+
/* Returns a bitmask of which words we have error bits in */
static int check_cpuflags(void)
{
@@ -153,6 +160,19 @@
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
err = check_cpuflags();
+ } else if (err == 0x01 &&
+ !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
+ is_intel() && cpu.level == 6 &&
+ (cpu.model == 9 || cpu.model == 13)) {
+ /* PAE is disabled on this Pentium M but can be forced */
+ if (cmdline_find_option_bool("forcepae")) {
+ puts("WARNING: Forcing PAE in CPU flags\n");
+ set_bit(X86_FEATURE_PAE, cpu.flags);
+ err = check_cpuflags();
+ }
+ else {
+ puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
+ }
}
if (err_flags_ptr)
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index ec3b8ba..0ca9a5c 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -283,7 +283,7 @@
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x020c # header version number (>= 0x0105)
+ .word 0x020d # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -350,7 +350,7 @@
# can be located anywhere in
# low memory 0x10000 or higher.
-ramdisk_max: .long 0x7fffffff
+initrd_addr_max: .long 0x7fffffff
# (Header version 0x0203 or later)
# The highest safe address for
# the contents of an initrd
@@ -375,7 +375,8 @@
# define XLF0 0
#endif
-#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \
+ !defined(CONFIG_EFI_MIXED)
/* kernel/boot_param/ramdisk could be loaded above 4g */
# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
#else
@@ -383,10 +384,14 @@
#endif
#ifdef CONFIG_EFI_STUB
-# ifdef CONFIG_X86_64
-# define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */
+# ifdef CONFIG_EFI_MIXED
+# define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
# else
-# define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */
+# ifdef CONFIG_X86_64
+# define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */
+# else
+# define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */
+# endif
# endif
#else
# define XLF23 0
@@ -426,13 +431,7 @@
#define INIT_SIZE VO_INIT_SIZE
#endif
init_size: .long INIT_SIZE # kernel initialization size
-handover_offset:
-#ifdef CONFIG_EFI_STUB
- .long 0x30 # offset to the handover
- # protocol entry point
-#else
- .long 0
-#endif
+handover_offset: .long 0 # Filled in by build.c
# End of setup header #####################################################
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 8e15b22..1a2f212 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -53,7 +53,8 @@
#define PECOFF_RELOC_RESERVE 0x20
-unsigned long efi_stub_entry;
+unsigned long efi32_stub_entry;
+unsigned long efi64_stub_entry;
unsigned long efi_pe_entry;
unsigned long startup_64;
@@ -219,6 +220,52 @@
update_pecoff_section_header(".text", text_start, text_sz);
}
+static int reserve_pecoff_reloc_section(int c)
+{
+ /* Reserve 0x20 bytes for .reloc section */
+ memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+ return PECOFF_RELOC_RESERVE;
+}
+
+static void efi_stub_defaults(void)
+{
+ /* Defaults for old kernel */
+#ifdef CONFIG_X86_32
+ efi_pe_entry = 0x10;
+#else
+ efi_pe_entry = 0x210;
+ startup_64 = 0x200;
+#endif
+}
+
+static void efi_stub_entry_update(void)
+{
+ unsigned long addr = efi32_stub_entry;
+
+#ifdef CONFIG_X86_64
+ /* Yes, this is really how we defined it :( */
+ addr = efi64_stub_entry - 0x200;
+#endif
+
+#ifdef CONFIG_EFI_MIXED
+ if (efi32_stub_entry != addr)
+ die("32-bit and 64-bit EFI entry points do not match\n");
+#endif
+ put_unaligned_le32(addr, &buf[0x264]);
+}
+
+#else
+
+static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
+static inline void update_pecoff_text(unsigned int text_start,
+ unsigned int file_sz) {}
+static inline void efi_stub_defaults(void) {}
+static inline void efi_stub_entry_update(void) {}
+
+static inline int reserve_pecoff_reloc_section(int c)
+{
+ return 0;
+}
#endif /* CONFIG_EFI_STUB */
@@ -250,7 +297,8 @@
p = (char *)buf;
while (p && *p) {
- PARSE_ZOFS(p, efi_stub_entry);
+ PARSE_ZOFS(p, efi32_stub_entry);
+ PARSE_ZOFS(p, efi64_stub_entry);
PARSE_ZOFS(p, efi_pe_entry);
PARSE_ZOFS(p, startup_64);
@@ -271,15 +319,7 @@
void *kernel;
u32 crc = 0xffffffffUL;
- /* Defaults for old kernel */
-#ifdef CONFIG_X86_32
- efi_pe_entry = 0x10;
- efi_stub_entry = 0x30;
-#else
- efi_pe_entry = 0x210;
- efi_stub_entry = 0x230;
- startup_64 = 0x200;
-#endif
+ efi_stub_defaults();
if (argc != 5)
usage();
@@ -302,11 +342,7 @@
die("Boot block hasn't got boot flag (0xAA55)");
fclose(file);
-#ifdef CONFIG_EFI_STUB
- /* Reserve 0x20 bytes for .reloc section */
- memset(buf+c, 0, PECOFF_RELOC_RESERVE);
- c += PECOFF_RELOC_RESERVE;
-#endif
+ c += reserve_pecoff_reloc_section(c);
/* Pad unused space with zeros */
setup_sectors = (c + 511) / 512;
@@ -315,9 +351,7 @@
i = setup_sectors*512;
memset(buf+c, 0, i-c);
-#ifdef CONFIG_EFI_STUB
update_pecoff_setup_and_reloc(i);
-#endif
/* Set the default root device */
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
@@ -342,14 +376,9 @@
buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]);
-#ifdef CONFIG_EFI_STUB
update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
-#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
- efi_stub_entry -= 0x200;
-#endif
- put_unaligned_le32(efi_stub_entry, &buf[0x264]);
-#endif
+ efi_stub_entry_update();
crc = partial_crc32(buf, i, crc);
if (fwrite(buf, 1, i, dest) != i)
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 7f66985..a8fee07 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -5,3 +5,4 @@
genhdr-y += unistd_x32.h
generic-y += clkdev.h
+generic-y += mcs_spinlock.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 1d2091a..19b0eba 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -93,9 +93,6 @@
return 0;
}
#endif
-extern void xapic_wait_icr_idle(void);
-extern u32 safe_xapic_wait_icr_idle(void);
-extern void xapic_icr_write(u32, u32);
extern int setup_profiling_timer(unsigned int);
static inline void native_apic_mem_write(u32 reg, u32 v)
@@ -184,7 +181,6 @@
extern int x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
-extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
u64 msr;
@@ -221,7 +217,6 @@
{
}
-#define nox2apic 0
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
@@ -351,7 +346,7 @@
int trampoline_phys_low;
int trampoline_phys_high;
- void (*wait_for_init_deassert)(atomic_t *deassert);
+ bool wait_for_init_deassert;
void (*smp_callin_clear_local_apic)(void);
void (*inquire_remote_apic)(int apicid);
@@ -517,13 +512,6 @@
extern int default_check_phys_apicid_present(int phys_apicid);
#endif
-static inline void default_wait_for_init_deassert(atomic_t *deassert)
-{
- while (!atomic_read(deassert))
- cpu_relax();
- return;
-}
-
extern void generic_bigsmp_probe(void);
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 04a4890..69bbb48 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,11 +85,7 @@
#else
# define smp_rmb() barrier()
#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() wmb()
-#else
-# define smp_wmb() barrier()
-#endif
+#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
@@ -100,7 +96,7 @@
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For either of these options x86 doesn't have a strong TSO memory
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e099f95..bc507d7 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -217,9 +217,14 @@
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3d6b9f8..0869434 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -19,9 +19,11 @@
*/
#define EFI_OLD_MEMMAP EFI_ARCH_1
+#define EFI32_LOADER_SIGNATURE "EL32"
+#define EFI64_LOADER_SIGNATURE "EL64"
+
#ifdef CONFIG_X86_32
-#define EFI_LOADER_SIGNATURE "EL32"
extern unsigned long asmlinkage efi_call_phys(void *, ...);
@@ -57,8 +59,6 @@
#else /* !CONFIG_X86_32 */
-#define EFI_LOADER_SIGNATURE "EL64"
-
extern u64 efi_call0(void *fp);
extern u64 efi_call1(void *fp, u64 arg1);
extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
@@ -119,7 +119,6 @@
#endif /* CONFIG_X86_32 */
extern int add_efi_memmap;
-extern unsigned long x86_efi_facility;
extern struct efi_scratch efi_scratch;
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int efi_memblock_x86_reserve_range(void);
@@ -130,10 +129,13 @@
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
-extern void efi_setup_page_tables(void);
+extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_mkexec(void);
+extern void __init efi_dump_pagetable(void);
+extern void __init efi_apply_memmap_quirks(void);
struct efi_setup_data {
u64 fw_vendor;
@@ -152,8 +154,40 @@
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
}
+static inline bool efi_runtime_supported(void)
+{
+ if (efi_is_native())
+ return true;
+
+ if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
+ return true;
+
+ return false;
+}
+
extern struct console early_efi_console;
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
+
+#ifdef CONFIG_EFI_MIXED
+extern void efi_thunk_runtime_setup(void);
+extern efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map);
+#else
+static inline void efi_thunk_runtime_setup(void) {}
+static inline efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map)
+{
+ return EFI_SUCCESS;
+}
+#endif /* CONFIG_EFI_MIXED */
#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34f69cb..91d9c69 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -237,7 +237,7 @@
static inline void flush_write_buffers(void)
{
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
#endif
}
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e139b13..de36f22 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -214,6 +214,8 @@
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
+int msr_set_bit(u32 msr, u8 bit);
+int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 86f9301..5f2fc44 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_NMI_H
#define _ASM_X86_NMI_H
+#include <linux/irq_work.h>
#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -38,6 +39,8 @@
struct nmiaction {
struct list_head list;
nmi_handler_t handler;
+ u64 max_duration;
+ struct irq_work irq_work;
unsigned long flags;
const char *name;
};
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ad38ad..b459ddf 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -15,9 +15,10 @@
: (prot))
#ifndef __ASSEMBLY__
-
#include <asm/x86_init.h>
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -445,20 +446,10 @@
return a.pte == b.pte;
}
-static inline int pteval_present(pteval_t pteval)
-{
- /*
- * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
- * way clearly states that the intent is that protnone and numa
- * hinting ptes are considered present for the purposes of
- * pagetable operations like zapping, protection changes, gup etc.
- */
- return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
static inline int pte_present(pte_t a)
{
- return pteval_present(pte_flags(a));
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 1aa9ccd..708f19f 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -382,9 +382,13 @@
* as a pte too.
*/
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags);
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+ unsigned numpages);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 645cad2..e820c08 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -191,6 +191,14 @@
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}
+static inline void clflushopt(volatile void *__p)
+{
+ alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
+ ".byte 0x66; clflush %P0",
+ X86_FEATURE_CLFLUSHOPT,
+ "+m" (*(volatile char __force *)__p));
+}
+
#define nop() asm volatile ("nop")
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index bf156de..0f62f54 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,10 +26,9 @@
# define LOCK_PTR_REG "D"
#endif
-#if defined(CONFIG_X86_32) && \
- (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
+#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
- * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
+ * On PPro SMP, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index d35f24e..b28097e 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif
@@ -133,12 +134,6 @@
struct pci_bus;
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
-#ifdef CONFIG_SMP
-#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \
- (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
-#define smt_capable() (smp_num_siblings > 1)
-#endif
-
#ifdef CONFIG_NUMA
extern int get_mp_bus_to_node(int busnum);
extern void set_mp_bus_to_node(int busnum, int node);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 57ae63c..94605c0 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -66,6 +66,6 @@
extern void tsc_restore_sched_clock_state(void);
/* MSR based TSC calibration for Intel Atom SoC platforms */
-int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
+unsigned long try_msr_calibrate_tsc(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index c2a4813..3f556c6 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -23,6 +23,9 @@
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_COMPAT_SYS_GETDENTS64
+# define __ARCH_WANT_COMPAT_SYS_PREADV64
+# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 5547389..6c1d741 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -6,11 +6,14 @@
#define XSTATE_CPUID 0x0000000d
-#define XSTATE_FP 0x1
-#define XSTATE_SSE 0x2
-#define XSTATE_YMM 0x4
-#define XSTATE_BNDREGS 0x8
-#define XSTATE_BNDCSR 0x10
+#define XSTATE_FP 0x1
+#define XSTATE_SSE 0x2
+#define XSTATE_YMM 0x4
+#define XSTATE_BNDREGS 0x8
+#define XSTATE_BNDCSR 0x10
+#define XSTATE_OPMASK 0x20
+#define XSTATE_ZMM_Hi256 0x40
+#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
@@ -23,7 +26,8 @@
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/* Supported features which support lazy state saving */
-#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
+ | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Supported features which require eager state saving */
#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c19fc60..4924f4b 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -368,33 +368,58 @@
#define THERM_LOG_THRESHOLD1 (1 << 9)
/* MISC_ENABLE bits: architectural */
-#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
-#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
-#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
-#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
-#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
-#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
+#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
+#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
+#define MSR_IA32_MISC_ENABLE_TCC_BIT 1
+#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
+#define MSR_IA32_MISC_ENABLE_EMON_BIT 7
+#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
+#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
+#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
-#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
-#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
-#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
-#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
-#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
-#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
-#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
-#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
-#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
-#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
-#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
-#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
-#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
+#define MSR_IA32_MISC_ENABLE_TM1_BIT 3
+#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_BIT 10
+#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
+#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
+#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
#define MSR_IA32_TSC_DEADLINE 0x000006E0
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 1dac942..9f46f2b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -613,10 +613,10 @@
int nid;
nid = acpi_get_node(handle);
- if (nid == -1 || !node_online(nid))
- return;
- set_apicid_to_node(physid, nid);
- numa_set_node(cpu, nid);
+ if (nid != -1) {
+ set_apicid_to_node(physid, nid);
+ numa_set_node(cpu, nid);
+ }
#endif
}
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index fd972a3..9fa8aa0 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -18,7 +18,6 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/bitops.h>
-#include <linux/ioport.h>
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
@@ -54,18 +53,6 @@
int fix_aperture __initdata = 1;
-static struct resource gart_resource = {
- .name = "GART",
- .flags = IORESOURCE_MEM,
-};
-
-static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
-{
- gart_resource.start = aper_base;
- gart_resource.end = aper_base + aper_size - 1;
- insert_resource(&iomem_resource, &gart_resource);
-}
-
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -96,7 +83,6 @@
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
- insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
@@ -444,12 +430,8 @@
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base) {
- unsigned long n = (32 * 1024 * 1024) << last_aper_order;
-
- insert_aperture_resource((u32)last_aper_base, n);
+ if (last_aper_base)
return 1;
- }
return 0;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 7f26c9a..53e2053 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -133,6 +133,10 @@
* +1=force-enable
*/
static int force_enable_local_apic __initdata;
+
+/* Control whether x2APIC mode is enabled or not */
+static bool nox2apic __initdata;
+
/*
* APIC command line parameters
*/
@@ -162,8 +166,7 @@
/* x2apic enabled before OS handover */
int x2apic_preenabled;
static int x2apic_disabled;
-static int nox2apic;
-static __init int setup_nox2apic(char *str)
+static int __init setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
int apicid = native_apic_msr_read(APIC_ID);
@@ -178,7 +181,7 @@
} else
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
- nox2apic = 1;
+ nox2apic = true;
return 0;
}
@@ -283,8 +286,12 @@
void native_apic_icr_write(u32 low, u32 id)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
apic_write(APIC_ICR, low);
+ local_irq_restore(flags);
}
u64 native_apic_icr_read(void)
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 2c621a6..7c1b294 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -198,7 +198,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
@@ -314,7 +314,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 191ce75..8c7c982 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -172,8 +172,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
-
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 3e67f9e..a5b45df 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -248,7 +248,7 @@
.wakeup_secondary_cpu = numachip_wakeup_secondary,
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL, /* REMRD not supported */
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index d50e364..e4840aa 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -199,8 +199,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = default_wait_for_init_deassert,
-
+ .wait_for_init_deassert = true,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index c552247..6f8f8b3 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -394,12 +394,6 @@
WARN(1, "Command failed, status = %x\n", mip_status);
}
-static void es7000_wait_for_init_deassert(atomic_t *deassert)
-{
- while (!atomic_read(deassert))
- cpu_relax();
-}
-
static unsigned int es7000_get_apic_id(unsigned long x)
{
return (x >> 24) & 0xFF;
@@ -658,8 +652,7 @@
.trampoline_phys_low = 0x467,
.trampoline_phys_high = 0x469,
- .wait_for_init_deassert = NULL,
-
+ .wait_for_init_deassert = false,
/* Nothing to do for most platforms, since cleared by the INIT cycle: */
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
@@ -722,8 +715,7 @@
.trampoline_phys_low = 0x467,
.trampoline_phys_high = 0x469,
- .wait_for_init_deassert = es7000_wait_for_init_deassert,
-
+ .wait_for_init_deassert = true,
/* Nothing to do for most platforms, since cleared by the INIT cycle: */
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 1e42e8f..030ea1c 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -505,8 +505,7 @@
.trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
/* We don't do anything here because we use NMI's to boot instead */
- .wait_for_init_deassert = NULL,
-
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
.inquire_remote_apic = NULL,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index eb35ef9..cceb352 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -119,8 +119,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = default_wait_for_init_deassert,
-
+ .wait_for_init_deassert = true,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 00146f9..b656128 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -532,8 +532,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = default_wait_for_init_deassert,
-
+ .wait_for_init_deassert = true,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index cac85ee..e66766b 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -279,7 +279,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index de231e3..6d600eb 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -133,7 +133,7 @@
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d263b13..7834389 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -396,7 +396,7 @@
.wakeup_secondary_cpu = uv_wakeup_secondary,
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
- .wait_for_init_deassert = NULL,
+ .wait_for_init_deassert = false,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c67ffa6..ce8b8ff 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -218,7 +218,7 @@
*/
WARN_ONCE(1, "WARNING: This combination of AMD"
" processors is not suitable for SMP.\n");
- add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
}
static void init_amd_k7(struct cpuinfo_x86 *c)
@@ -233,9 +233,7 @@
if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
- rdmsr(MSR_K7_HWCR, l, h);
- l &= ~0x00008000;
- wrmsr(MSR_K7_HWCR, l, h);
+ msr_clear_bit(MSR_K7_HWCR, 15);
set_cpu_cap(c, X86_FEATURE_XMM);
}
}
@@ -509,14 +507,8 @@
#endif
/* F16h erratum 793, CVE-2013-6885 */
- if (c->x86 == 0x16 && c->x86_model <= 0xf) {
- u64 val;
-
- rdmsrl(MSR_AMD64_LS_CFG, val);
- if (!(val & BIT(15)))
- wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
- }
-
+ if (c->x86 == 0x16 && c->x86_model <= 0xf)
+ msr_set_bit(MSR_AMD64_LS_CFG, 15);
}
static const int amd_erratum_383[];
@@ -536,11 +528,8 @@
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
- if (c->x86 == 0xf) {
- rdmsrl(MSR_K7_HWCR, value);
- value |= 1 << 6;
- wrmsrl(MSR_K7_HWCR, value);
- }
+ if (c->x86 == 0xf)
+ msr_set_bit(MSR_K7_HWCR, 6);
#endif
early_init_amd(c);
@@ -623,14 +612,11 @@
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
- if (!rdmsrl_safe(0xc0011005, &value)) {
- value |= 1ULL << 54;
- wrmsrl_safe(0xc0011005, value);
+ if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
- if (value & (1ULL << 54)) {
+ if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- printk(KERN_INFO FW_INFO "CPU: Re-enabling "
- "disabled Topology Extensions Support\n");
+ pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
@@ -709,19 +695,12 @@
* Disable GART TLB Walk Errors on Fam10h. We do this here
* because this is always needed when GART is enabled, even in a
* kernel which has no MCE support built in.
- * BIOS should disable GartTlbWlk Errors themself. If
- * it doesn't do it here as suggested by the BKDG.
+ * BIOS should disable GartTlbWlk Errors already. If
+ * it doesn't, do it here as suggested by the BKDG.
*
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/
- u64 mask;
- int err;
-
- err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
- if (err == 0) {
- mask |= (1 << 10);
- wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
- }
+ msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
/*
* On family 10h BIOS may not have properly enabled WC+ support,
@@ -733,10 +712,7 @@
* NOTE: we want to use the _safe accessors so as not to #GP kvm
* guests on older kvm hosts.
*/
-
- rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
- value &= ~(1ULL << 24);
- wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
+ msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8779eda..d8fba5c 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -8,236 +8,6 @@
#include "cpu.h"
-#ifdef CONFIG_X86_OOSTORE
-
-static u32 power2(u32 x)
-{
- u32 s = 1;
-
- while (s <= x)
- s <<= 1;
-
- return s >>= 1;
-}
-
-
-/*
- * Set up an actual MCR
- */
-static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
-{
- u32 lo, hi;
-
- hi = base & ~0xFFF;
- lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
- lo &= ~0xFFF; /* Remove the ctrl value bits */
- lo |= key; /* Attribute we wish to set */
- wrmsr(reg+MSR_IDT_MCR0, lo, hi);
- mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
-}
-
-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
-
-/*
- * Compute a set of MCR's to give maximum coverage
- */
-static int centaur_mcr_compute(int nr, int key)
-{
- u32 mem = ramtop();
- u32 root = power2(mem);
- u32 base = root;
- u32 top = root;
- u32 floor = 0;
- int ct = 0;
-
- while (ct < nr) {
- u32 fspace = 0;
- u32 high;
- u32 low;
-
- /*
- * Find the largest block we will fill going upwards
- */
- high = power2(mem-top);
-
- /*
- * Find the largest block we will fill going downwards
- */
- low = base/2;
-
- /*
- * Don't fill below 1Mb going downwards as there
- * is an ISA hole in the way.
- */
- if (base <= 1024*1024)
- low = 0;
-
- /*
- * See how much space we could cover by filling below
- * the ISA hole
- */
-
- if (floor == 0)
- fspace = 512*1024;
- else if (floor == 512*1024)
- fspace = 128*1024;
-
- /* And forget ROM space */
-
- /*
- * Now install the largest coverage we get
- */
- if (fspace > high && fspace > low) {
- centaur_mcr_insert(ct, floor, fspace, key);
- floor += fspace;
- } else if (high > low) {
- centaur_mcr_insert(ct, top, high, key);
- top += high;
- } else if (low > 0) {
- base -= low;
- centaur_mcr_insert(ct, base, low, key);
- } else
- break;
- ct++;
- }
- /*
- * We loaded ct values. We now need to set the mask. The caller
- * must do this bit.
- */
- return ct;
-}
-
-static void centaur_create_optimal_mcr(void)
-{
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining and weak write ordered.
- *
- * To experiment with: Linux never uses stack operations for
- * mmio spaces so we could globally enable stack operation wc
- *
- * Load the registers with type 31 - full write combining, all
- * writes weakly ordered.
- */
- used = centaur_mcr_compute(6, 31);
-
- /*
- * Wipe unused MCRs
- */
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-static void winchip2_create_optimal_mcr(void)
-{
- u32 lo, hi;
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining, weak store ordered.
- *
- * Load the registers with type 25
- * 8 - weak write ordering
- * 16 - weak read ordering
- * 1 - write combining
- */
- used = centaur_mcr_compute(6, 25);
-
- /*
- * Mark the registers we are using.
- */
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- for (i = 0; i < used; i++)
- lo |= 1<<(9+i);
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-
- /*
- * Wipe unused MCRs
- */
-
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-/*
- * Handle the MCR key on the Winchip 2.
- */
-static void winchip2_unprotect_mcr(void)
-{
- u32 lo, hi;
- u32 key;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- key = (lo>>17) & 7;
- lo |= key<<6; /* replace with unlock key */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-static void winchip2_protect_mcr(void)
-{
- u32 lo, hi;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-#endif /* CONFIG_X86_OOSTORE */
-
#define ACE_PRESENT (1 << 6)
#define ACE_ENABLED (1 << 7)
#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
@@ -362,20 +132,6 @@
fcr_clr = DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n");
clear_cpu_cap(c, X86_FEATURE_TSC);
-#ifdef CONFIG_X86_OOSTORE
- centaur_create_optimal_mcr();
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- *
- * The C6 original lacks weak read order
- *
- * Note 0x120 is write only on Winchip 1
- */
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif
break;
case 8:
switch (c->x86_mask) {
@@ -392,40 +148,12 @@
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
case 9:
name = "3";
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
default:
name = "??";
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 5cd9bfa..897d620 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -31,11 +31,8 @@
/* Unmask CPUID levels if masked: */
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
- if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
- misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
}
@@ -129,16 +126,10 @@
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
- if (c->x86 == 15) {
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
- if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
- printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
-
- misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- }
- }
+ if (c->x86 == 15)
+ if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
+ pr_info("kmemcheck: Disabling fast string operations\n");
#endif
/*
@@ -195,10 +186,16 @@
}
}
+static int forcepae;
+static int __init forcepae_setup(char *__unused)
+{
+ forcepae = 1;
+ return 1;
+}
+__setup("forcepae", forcepae_setup);
+
static void intel_workarounds(struct cpuinfo_x86 *c)
{
- unsigned long lo, hi;
-
#ifdef CONFIG_X86_F00F_BUG
/*
* All current models of Pentium and Pentium with MMX technology CPUs
@@ -225,16 +222,26 @@
clear_cpu_cap(c, X86_FEATURE_SEP);
/*
+ * PAE CPUID issue: many Pentium M report no PAE but may have a
+ * functionally usable PAE implementation.
+ * Forcefully enable PAE if kernel parameter "forcepae" is present.
+ */
+ if (forcepae) {
+ printk(KERN_WARNING "PAE forced!\n");
+ set_cpu_cap(c, X86_FEATURE_PAE);
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
+ }
+
+ /*
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
- rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
- if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
- printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
- printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
- lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
- wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
+ if (msr_set_bit(MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+ > 0) {
+ pr_info("CPU: C0 stepping P4 Xeon detected.\n");
+ pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
}
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 9f7ca26..832d05a 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -26,6 +26,7 @@
#include <asm/irq_regs.h>
#include <asm/i8259.h>
#include <asm/apic.h>
+#include <asm/timer.h>
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -105,6 +106,11 @@
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+
+#ifdef CONFIG_X86_IO_APIC
+ no_timer_check = 1;
+#endif
+
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b886451..ae407f7 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -892,7 +892,6 @@
* hw_perf_group_sched_in() or x86_pmu_enable()
*
* step1: save events moving to new counters
- * step2: reprogram moved events into new counters
*/
for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i];
@@ -918,6 +917,9 @@
x86_pmu_stop(event, PERF_EF_UPDATE);
}
+ /*
+ * step2: reprogram moved events into new counters
+ */
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
@@ -1043,7 +1045,7 @@
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
- * at commit time (->commit_txn) as a whole
+ * at commit time (->commit_txn) as a whole.
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto done_collect;
@@ -1058,6 +1060,10 @@
memcpy(cpuc->assign, assign, n*sizeof(int));
done_collect:
+ /*
+ * Commit the collect_events() state. See x86_pmu_del() and
+ * x86_pmu_*_txn().
+ */
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
@@ -1183,25 +1189,38 @@
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
+ *
+ * XXX assumes any ->del() called during a TXN will only be on
+ * an event added during that same TXN.
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
return;
+ /*
+ * Not a TXN, therefore cleanup properly.
+ */
x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) {
- if (event == cpuc->event_list[i]) {
-
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(cpuc, event);
-
- while (++i < cpuc->n_events)
- cpuc->event_list[i-1] = cpuc->event_list[i];
-
- --cpuc->n_events;
+ if (event == cpuc->event_list[i])
break;
- }
}
+
+ if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
+ return;
+
+ /* If we have a newly added event; make sure to decrease n_added. */
+ if (i >= cpuc->n_events - cpuc->n_added)
+ --cpuc->n_added;
+
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(cpuc, event);
+
+ /* Delete the array entry. */
+ while (++i < cpuc->n_events)
+ cpuc->event_list[i-1] = cpuc->event_list[i];
+ --cpuc->n_events;
+
perf_event_update_userpage(event);
}
@@ -1521,6 +1540,8 @@
pr_cont("%s PMU driver.\n", x86_pmu.name);
+ x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@@ -1534,7 +1555,6 @@
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0, 0);
- x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (x86_pmu.event_attrs)
@@ -1594,7 +1614,8 @@
{
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/*
- * Truncate the collected events.
+ * Truncate collected array by the number of events added in this
+ * transaction. See x86_pmu_add() and x86_pmu_*_txn().
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
@@ -1605,6 +1626,8 @@
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
+ *
+ * Does not cancel the transaction on failure; expects the caller to do this.
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
@@ -1820,9 +1843,12 @@
if (ret)
return ret;
+ if (x86_pmu.attr_rdpmc_broken)
+ return -ENOTSUPP;
+
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
- smp_call_function(change_rdpmc, (void *)val, 1);
+ on_each_cpu(change_rdpmc, (void *)val, 1);
}
return count;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index c1a8618..3b2f9bd 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -130,9 +130,11 @@
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
- int n_events;
- int n_added;
- int n_txn;
+ int n_events; /* the # of events in the below arrays */
+ int n_added; /* the # last events in the below arrays;
+ they've never been enabled yet */
+ int n_txn; /* the # last events in the below arrays;
+ added in the current transaction */
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
@@ -409,6 +411,7 @@
/*
* sysfs attrs
*/
+ int attr_rdpmc_broken;
int attr_rdpmc;
struct attribute **format_attrs;
struct attribute **event_attrs;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 0fa4f24..aa333d9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1361,10 +1361,8 @@
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
- if (!status) {
- intel_pmu_enable_all(0);
- return handled;
- }
+ if (!status)
+ goto done;
loops = 0;
again:
@@ -2310,10 +2308,7 @@
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- /*
- * v2 and above have a perf capabilities MSR
- */
- if (version > 1) {
+ if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 29c2487..bd2253d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -66,6 +66,47 @@
DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
+static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
+static void uncore_pmu_event_read(struct perf_event *event);
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+ struct intel_uncore_box *box;
+
+ box = *per_cpu_ptr(pmu->box, cpu);
+ if (box)
+ return box;
+
+ raw_spin_lock(&uncore_box_lock);
+ list_for_each_entry(box, &pmu->box_list, list) {
+ if (box->phys_id == topology_physical_package_id(cpu)) {
+ atomic_inc(&box->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = box;
+ break;
+ }
+ }
+ raw_spin_unlock(&uncore_box_lock);
+
+ return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+ /*
+ * perf core schedules event on the basis of cpu, uncore events are
+ * collected by one of the cpus inside a physical package.
+ */
+ return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+}
+
static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
{
u64 count;
@@ -501,8 +542,11 @@
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@@ -1178,10 +1222,15 @@
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
@@ -1631,6 +1680,349 @@
&snb_uncore_cbox,
NULL,
};
+
+enum {
+ SNB_PCI_UNCORE_IMC,
+};
+
+static struct uncore_event_desc snb_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
+ INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
+ INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+
+ { /* end: all zeroes */ },
+};
+
+#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
+#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
+
+/* page size multiple covering all config regs */
+#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
+
+#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
+#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
+#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+
+static struct attribute *snb_uncore_imc_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group snb_uncore_imc_format_group = {
+ .name = "format",
+ .attrs = snb_uncore_imc_formats_attr,
+};
+
+static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
+ resource_size_t addr;
+ u32 pci_dword;
+
+ pci_read_config_dword(pdev, where, &pci_dword);
+ addr = pci_dword;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ pci_read_config_dword(pdev, where + 4, &pci_dword);
+ addr |= ((resource_size_t)pci_dword << 32);
+#endif
+
+ addr &= ~(PAGE_SIZE - 1);
+
+ box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+ box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
+}
+
+static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
+}
+
+/*
+ * custom event_init() function because we define our own fixed, free
+ * running counters, so we do not want to conflict with generic uncore
+ * logic. Also simplifies processing
+ */
+static int snb_uncore_imc_event_init(struct perf_event *event)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
+ int idx, base;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ pmu = uncore_event_to_pmu(event);
+ /* no device found for this pmu */
+ if (pmu->func_id < 0)
+ return -ENOENT;
+
+ /* Sampling not supported yet */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ /*
+ * Place all uncore events for a particular physical package
+ * onto a single cpu
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* check only supported bits are set */
+ if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
+ return -EINVAL;
+
+ box = uncore_pmu_to_box(pmu, event->cpu);
+ if (!box || box->cpu < 0)
+ return -EINVAL;
+
+ event->cpu = box->cpu;
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+ event->hw.extra_reg.idx = EXTRA_REG_NONE;
+ event->hw.branch_reg.idx = EXTRA_REG_NONE;
+ /*
+ * check event is known (whitelist, determines counter)
+ */
+ switch (cfg) {
+ case SNB_UNCORE_PCI_IMC_DATA_READS:
+ base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
+ idx = UNCORE_PMC_IDX_FIXED;
+ break;
+ case SNB_UNCORE_PCI_IMC_DATA_WRITES:
+ base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
+ idx = UNCORE_PMC_IDX_FIXED + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* must be done before validate_group */
+ event->hw.event_base = base;
+ event->hw.config = cfg;
+ event->hw.idx = idx;
+
+ /* no group validation needed, we have free running counters */
+
+ return 0;
+}
+
+static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return 0;
+}
+
+static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ u64 count;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ event->hw.state = 0;
+ box->n_active++;
+
+ list_add_tail(&event->active_entry, &box->active_list);
+
+ count = snb_uncore_imc_read_counter(box, event);
+ local64_set(&event->hw.prev_count, count);
+
+ if (box->n_active == 1)
+ uncore_pmu_start_hrtimer(box);
+}
+
+static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ box->n_active--;
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ list_del(&event->active_entry);
+
+ if (box->n_active == 0)
+ uncore_pmu_cancel_hrtimer(box);
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ uncore_perf_event_update(box, event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!box)
+ return -ENODEV;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ snb_uncore_imc_event_start(event, 0);
+
+ box->n_events++;
+
+ return 0;
+}
+
+static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int i;
+
+ snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
+
+ for (i = 0; i < box->n_events; i++) {
+ if (event == box->event_list[i]) {
+ --box->n_events;
+ break;
+ }
+ }
+}
+
+static int snb_pci2phy_map_init(int devid)
+{
+ struct pci_dev *dev = NULL;
+ int bus;
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
+ if (!dev)
+ return -ENOTTY;
+
+ bus = dev->bus->number;
+
+ pcibus_to_physid[bus] = 0;
+
+ pci_dev_put(dev);
+
+ return 0;
+}
+
+static struct pmu snb_uncore_imc_pmu = {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = snb_uncore_imc_event_init,
+ .add = snb_uncore_imc_event_add,
+ .del = snb_uncore_imc_event_del,
+ .start = snb_uncore_imc_event_start,
+ .stop = snb_uncore_imc_event_stop,
+ .read = uncore_pmu_event_read,
+};
+
+static struct intel_uncore_ops snb_uncore_imc_ops = {
+ .init_box = snb_uncore_imc_init_box,
+ .enable_box = snb_uncore_imc_enable_box,
+ .disable_box = snb_uncore_imc_disable_box,
+ .disable_event = snb_uncore_imc_disable_event,
+ .enable_event = snb_uncore_imc_enable_event,
+ .hw_config = snb_uncore_imc_hw_config,
+ .read_counter = snb_uncore_imc_read_counter,
+};
+
+static struct intel_uncore_type snb_uncore_imc = {
+ .name = "imc",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .fixed_ctr_bits = 32,
+ .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
+ .event_descs = snb_uncore_imc_events,
+ .format_group = &snb_uncore_imc_format_group,
+ .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
+ .ops = &snb_uncore_imc_ops,
+ .pmu = &snb_uncore_imc_pmu,
+};
+
+static struct intel_uncore_type *snb_pci_uncores[] = {
+ [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
+ NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static struct pci_driver snb_uncore_pci_driver = {
+ .name = "snb_uncore",
+ .id_table = snb_uncore_pci_ids,
+};
+
+static struct pci_driver ivb_uncore_pci_driver = {
+ .name = "ivb_uncore",
+ .id_table = ivb_uncore_pci_ids,
+};
+
+static struct pci_driver hsw_uncore_pci_driver = {
+ .name = "hsw_uncore",
+ .id_table = hsw_uncore_pci_ids,
+};
+
/* end of Sandy Bridge uncore support */
/* Nehalem uncore support */
@@ -2781,6 +3173,7 @@
static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
{
struct intel_uncore_box *box;
+ struct perf_event *event;
unsigned long flags;
int bit;
@@ -2793,19 +3186,27 @@
*/
local_irq_save(flags);
+ /*
+ * handle boxes with an active event list as opposed to active
+ * counters
+ */
+ list_for_each_entry(event, &box->active_list, active_entry) {
+ uncore_perf_event_update(box, event);
+ }
+
for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
uncore_perf_event_update(box, box->events[bit]);
local_irq_restore(flags);
- hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+ hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
return HRTIMER_RESTART;
}
static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
{
__hrtimer_start_range_ns(&box->hrtimer,
- ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+ ns_to_ktime(box->hrtimer_duration), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
@@ -2839,45 +3240,14 @@
box->cpu = -1;
box->phys_id = -1;
+ /* set default hrtimer timeout */
+ box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
+
+ INIT_LIST_HEAD(&box->active_list);
+
return box;
}
-static struct intel_uncore_box *
-uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
-{
- struct intel_uncore_box *box;
-
- box = *per_cpu_ptr(pmu->box, cpu);
- if (box)
- return box;
-
- raw_spin_lock(&uncore_box_lock);
- list_for_each_entry(box, &pmu->box_list, list) {
- if (box->phys_id == topology_physical_package_id(cpu)) {
- atomic_inc(&box->refcnt);
- *per_cpu_ptr(pmu->box, cpu) = box;
- break;
- }
- }
- raw_spin_unlock(&uncore_box_lock);
-
- return *per_cpu_ptr(pmu->box, cpu);
-}
-
-static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
-{
- return container_of(event->pmu, struct intel_uncore_pmu, pmu);
-}
-
-static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
-{
- /*
- * perf core schedules event on the basis of cpu, uncore events are
- * collected by one of the cpus inside a physical package.
- */
- return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
-}
-
static int
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
{
@@ -3271,16 +3641,21 @@
{
int ret;
- pmu->pmu = (struct pmu) {
- .attr_groups = pmu->type->attr_groups,
- .task_ctx_nr = perf_invalid_context,
- .event_init = uncore_pmu_event_init,
- .add = uncore_pmu_event_add,
- .del = uncore_pmu_event_del,
- .start = uncore_pmu_event_start,
- .stop = uncore_pmu_event_stop,
- .read = uncore_pmu_event_read,
- };
+ if (!pmu->type->pmu) {
+ pmu->pmu = (struct pmu) {
+ .attr_groups = pmu->type->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = uncore_pmu_event_init,
+ .add = uncore_pmu_event_add,
+ .del = uncore_pmu_event_del,
+ .start = uncore_pmu_event_start,
+ .stop = uncore_pmu_event_stop,
+ .read = uncore_pmu_event_read,
+ };
+ } else {
+ pmu->pmu = *pmu->type->pmu;
+ pmu->pmu.attr_groups = pmu->type->attr_groups;
+ }
if (pmu->type->num_boxes == 1) {
if (strlen(pmu->type->name) > 0)
@@ -3326,6 +3701,8 @@
if (!pmus)
return -ENOMEM;
+ type->pmus = pmus;
+
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
0, type->num_counters, 0, 0);
@@ -3361,7 +3738,6 @@
}
type->pmu_group = &uncore_pmu_attr_group;
- type->pmus = pmus;
return 0;
fail:
uncore_type_exit(type);
@@ -3493,6 +3869,28 @@
pci_uncores = ivt_pci_uncores;
uncore_pci_driver = &ivt_uncore_pci_driver;
break;
+ case 42: /* Sandy Bridge */
+ ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
+ if (ret)
+ return ret;
+ pci_uncores = snb_pci_uncores;
+ uncore_pci_driver = &snb_uncore_pci_driver;
+ break;
+ case 58: /* Ivy Bridge */
+ ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
+ if (ret)
+ return ret;
+ pci_uncores = snb_pci_uncores;
+ uncore_pci_driver = &ivb_uncore_pci_driver;
+ break;
+ case 60: /* Haswell */
+ case 69: /* Haswell Celeron */
+ ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
+ if (ret)
+ return ret;
+ pci_uncores = snb_pci_uncores;
+ uncore_pci_driver = &hsw_uncore_pci_driver;
+ break;
default:
return 0;
}
@@ -3764,7 +4162,7 @@
static int __init uncore_cpu_init(void)
{
- int ret, cpu, max_cores;
+ int ret, max_cores;
max_cores = boot_cpu_data.x86_max_cores;
switch (boot_cpu_data.x86_model) {
@@ -3808,29 +4206,6 @@
if (ret)
return ret;
- get_online_cpus();
-
- for_each_online_cpu(cpu) {
- int i, phys_id = topology_physical_package_id(cpu);
-
- for_each_cpu(i, &uncore_cpu_mask) {
- if (phys_id == topology_physical_package_id(i)) {
- phys_id = -1;
- break;
- }
- }
- if (phys_id < 0)
- continue;
-
- uncore_cpu_prepare(cpu, phys_id);
- uncore_event_init_cpu(cpu);
- }
- on_each_cpu(uncore_cpu_setup, NULL, 1);
-
- register_cpu_notifier(&uncore_cpu_nb);
-
- put_online_cpus();
-
return 0;
}
@@ -3859,6 +4234,41 @@
return 0;
}
+static void __init uncore_cpumask_init(void)
+{
+ int cpu;
+
+ /*
+ * ony invoke once from msr or pci init code
+ */
+ if (!cpumask_empty(&uncore_cpu_mask))
+ return;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ int i, phys_id = topology_physical_package_id(cpu);
+
+ for_each_cpu(i, &uncore_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i)) {
+ phys_id = -1;
+ break;
+ }
+ }
+ if (phys_id < 0)
+ continue;
+
+ uncore_cpu_prepare(cpu, phys_id);
+ uncore_event_init_cpu(cpu);
+ }
+ on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+ register_cpu_notifier(&uncore_cpu_nb);
+
+ put_online_cpus();
+}
+
+
static int __init intel_uncore_init(void)
{
int ret;
@@ -3877,6 +4287,7 @@
uncore_pci_exit();
goto fail;
}
+ uncore_cpumask_init();
uncore_pmus_register();
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index a80ab71..90236f0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -6,6 +6,7 @@
#define UNCORE_PMU_NAME_LEN 32
#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
+#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
#define UNCORE_FIXED_EVENT 0xff
#define UNCORE_PMC_IDX_MAX_GENERIC 8
@@ -440,6 +441,7 @@
struct intel_uncore_ops *ops;
struct uncore_event_desc *event_descs;
const struct attribute_group *attr_groups[4];
+ struct pmu *pmu; /* for custom pmu ops */
};
#define pmu_group attr_groups[0]
@@ -488,8 +490,11 @@
u64 tags[UNCORE_PMC_IDX_MAX];
struct pci_dev *pci_dev;
struct intel_uncore_pmu *pmu;
+ u64 hrtimer_duration; /* hrtimer timeout for this box */
struct hrtimer hrtimer;
struct list_head list;
+ struct list_head active_list;
+ void *io_addr;
struct intel_uncore_extra_reg shared_regs[0];
};
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 3486e66..5d466b7 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1257,7 +1257,24 @@
pass++;
goto again;
}
-
+ /*
+ * Perf does test runs to see if a whole group can be assigned
+ * together succesfully. There can be multiple rounds of this.
+ * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
+ * bits, such that the next round of group assignments will
+ * cause the above p4_should_swap_ts to pass instead of fail.
+ * This leads to counters exclusive to thread0 being used by
+ * thread1.
+ *
+ * Solve this with a cheap hack, reset the idx back to -1 to
+ * force a new lookup (p4_next_cntr) to get the right counter
+ * for the right thread.
+ *
+ * This probably doesn't comply with the general spirit of how
+ * perf wants to work, but P4 is special. :-(
+ */
+ if (p4_should_swap_ts(hwc->config, cpu))
+ hwc->idx = -1;
p4_pmu_swap_config_ts(hwc, cpu);
if (assign)
assign[i] = cntr_idx;
@@ -1322,6 +1339,7 @@
__init int p4_pmu_init(void)
{
unsigned int low, high;
+ int i, reg;
/* If we get stripped -- indexing fails */
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
@@ -1340,5 +1358,19 @@
x86_pmu = p4_pmu;
+ /*
+ * Even though the counters are configured to interrupt a particular
+ * logical processor when an overflow happens, testing has shown that
+ * on kdump kernels (which uses a single cpu), thread1's counter
+ * continues to run and will report an NMI on thread0. Due to the
+ * overflow bug, this leads to a stream of unknown NMIs.
+ *
+ * Solve this by zero'ing out the registers to mimic a reset.
+ */
+ for (i = 0; i < x86_pmu.num_counters; i++) {
+ reg = x86_pmu_config_addr(i);
+ wrmsrl_safe(reg, 0ULL);
+ }
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index b1e2fe1..7c1a0c0 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -231,31 +231,49 @@
};
+static __init void p6_pmu_rdpmc_quirk(void)
+{
+ if (boot_cpu_data.x86_mask < 9) {
+ /*
+ * PPro erratum 26; fixed in stepping 9 and above.
+ */
+ pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
+ x86_pmu.attr_rdpmc_broken = 1;
+ x86_pmu.attr_rdpmc = 0;
+ }
+}
+
__init int p6_pmu_init(void)
{
+ x86_pmu = p6_pmu;
+
switch (boot_cpu_data.x86_model) {
- case 1:
- case 3: /* Pentium Pro */
- case 5:
- case 6: /* Pentium II */
- case 7:
- case 8:
- case 11: /* Pentium III */
- case 9:
- case 13:
- /* Pentium M */
+ case 1: /* Pentium Pro */
+ x86_add_quirk(p6_pmu_rdpmc_quirk);
break;
+
+ case 3: /* Pentium II - Klamath */
+ case 5: /* Pentium II - Deschutes */
+ case 6: /* Pentium II - Mendocino */
+ break;
+
+ case 7: /* Pentium III - Katmai */
+ case 8: /* Pentium III - Coppermine */
+ case 10: /* Pentium III Xeon */
+ case 11: /* Pentium III - Tualatin */
+ break;
+
+ case 9: /* Pentium M - Banias */
+ case 13: /* Pentium M - Dothan */
+ break;
+
default:
- pr_cont("unsupported p6 CPU model %d ",
- boot_cpu_data.x86_model);
+ pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
return -ENODEV;
}
- x86_pmu = p6_pmu;
-
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
-
return 0;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a57902e..507de80 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -57,9 +57,7 @@
{
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
-#endif
-#ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index f2a1770..a21d49c 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -30,7 +30,7 @@
unsigned long dummy;
stack = &dummy;
- if (task && task != current)
+ if (task != current)
stack = (unsigned long *)task->thread.sp;
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 81ba276..f36bd42 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -544,6 +544,10 @@
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
+
+ cmpl $2,(%esp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
@@ -594,8 +598,9 @@
pop %edx
pop %ecx
pop %eax
- addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
+is_nmi:
+ addl $8,%esp /* drop vector number and error code */
iret
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e1aabdb..a468c0a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -343,6 +343,9 @@
ENTRY(early_idt_handler)
cld
+ cmpl $2,(%rsp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,early_recursion_flag(%rip)
jz 1f
incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@
popq %rdx
popq %rcx
popq %rax
- addq $16,%rsp # drop vector number and error code
decl early_recursion_flag(%rip)
+is_nmi:
+ addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e8368c6..d5dd808 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -86,10 +86,19 @@
void __kernel_fpu_end(void)
{
- if (use_eager_fpu())
- math_state_restore();
- else
+ if (use_eager_fpu()) {
+ /*
+ * For eager fpu, most the time, tsk_used_math() is true.
+ * Restore the user math as we are done with the kernel usage.
+ * At few instances during thread exit, signal handling etc,
+ * tsk_used_math() is false. Those few places will take proper
+ * actions, so we don't need to restore the math here.
+ */
+ if (likely(tsk_used_math(current)))
+ math_state_restore();
+ } else {
stts();
+ }
}
EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 4eabc16..679cef0 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -279,5 +279,7 @@
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
+ (unsigned long)&_text - __START_KERNEL);
}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 18be1893..e69f988 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/jump_label.h>
+#include <linux/random.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -43,13 +44,52 @@
} while (0)
#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long module_load_offset;
+static int randomize_modules = 1;
+
+/* Mutex protects the module_load_offset. */
+static DEFINE_MUTEX(module_kaslr_mutex);
+
+static int __init parse_nokaslr(char *p)
+{
+ randomize_modules = 0;
+ return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
+static unsigned long int get_module_load_offset(void)
+{
+ if (randomize_modules) {
+ mutex_lock(&module_kaslr_mutex);
+ /*
+ * Calculate the module_load_offset the first time this
+ * code is called. Once calculated it stays the same until
+ * reboot.
+ */
+ if (module_load_offset == 0)
+ module_load_offset =
+ (get_random_int() % 1024 + 1) * PAGE_SIZE;
+ mutex_unlock(&module_kaslr_mutex);
+ }
+ return module_load_offset;
+}
+#else
+static unsigned long int get_module_load_offset(void)
+{
+ return 0;
+}
+#endif
+
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return __vmalloc_node_range(size, 1,
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 6fcb49c..b4872b9 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -87,6 +87,7 @@
#define nmi_to_desc(type) (&nmi_desc[type])
static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
+
static int __init nmi_warning_debugfs(void)
{
debugfs_create_u64("nmi_longest_ns", 0644,
@@ -95,6 +96,20 @@
}
fs_initcall(nmi_warning_debugfs);
+static void nmi_max_handler(struct irq_work *w)
+{
+ struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
+ int remainder_ns, decimal_msecs;
+ u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+
+ remainder_ns = do_div(whole_msecs, (1000 * 1000));
+ decimal_msecs = remainder_ns / 1000;
+
+ printk_ratelimited(KERN_INFO
+ "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+ a->handler, whole_msecs, decimal_msecs);
+}
+
static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
{
struct nmi_desc *desc = nmi_to_desc(type);
@@ -110,26 +125,20 @@
* to handle those situations.
*/
list_for_each_entry_rcu(a, &desc->head, list) {
- u64 before, delta, whole_msecs;
- int remainder_ns, decimal_msecs, thishandled;
+ int thishandled;
+ u64 delta;
- before = sched_clock();
+ delta = sched_clock();
thishandled = a->handler(type, regs);
handled += thishandled;
- delta = sched_clock() - before;
+ delta = sched_clock() - delta;
trace_nmi_handler(a->handler, (int)delta, thishandled);
- if (delta < nmi_longest_ns)
+ if (delta < nmi_longest_ns || delta < a->max_duration)
continue;
- nmi_longest_ns = delta;
- whole_msecs = delta;
- remainder_ns = do_div(whole_msecs, (1000 * 1000));
- decimal_msecs = remainder_ns / 1000;
- printk_ratelimited(KERN_INFO
- "INFO: NMI handler (%ps) took too long to run: "
- "%lld.%03d msecs\n", a->handler, whole_msecs,
- decimal_msecs);
+ a->max_duration = delta;
+ irq_work_queue(&a->irq_work);
}
rcu_read_unlock();
@@ -146,6 +155,8 @@
if (!action->handler)
return -EINVAL;
+ init_irq_work(&action->irq_work, nmi_max_handler);
+
spin_lock_irqsave(&desc->lock, flags);
/*
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 872079a..f7d0672 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,8 +100,10 @@
flag |= __GFP_ZERO;
again:
page = NULL;
- if (!(flag & GFP_ATOMIC))
+ /* CMA can be used only in the context which permits sleeping */
+ if (flag & __GFP_WAIT)
page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ /* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3fb8d95..4505e2a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -298,10 +298,7 @@
*/
void arch_cpu_idle(void)
{
- if (cpuidle_idle_call())
- x86_idle();
- else
- local_irq_enable();
+ x86_idle();
}
/*
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 7c6acd4..ff898bb 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -529,7 +529,7 @@
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- node = val & 7;
+ node = pcibus_to_node(dev->bus) | (val & 7);
/*
* Some hardware may return an invalid node ID,
* so check it first:
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 06853e6..fa511ac 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -926,11 +926,11 @@
#ifdef CONFIG_EFI
if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL32", 4)) {
- set_bit(EFI_BOOT, &x86_efi_facility);
+ set_bit(EFI_BOOT, &efi.flags);
} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL64", 4)) {
- set_bit(EFI_BOOT, &x86_efi_facility);
- set_bit(EFI_64BIT, &x86_efi_facility);
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
}
if (efi_enabled(EFI_BOOT))
@@ -1239,14 +1239,8 @@
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
- /* Once setup is done above, unmap the EFI memory map on
- * mismatched firmware/kernel archtectures since there is no
- * support for runtime services.
- */
- if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
- pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
- efi_unmap_memmap();
- }
+ if (efi_enabled(EFI_BOOT))
+ efi_apply_memmap_quirks();
#endif
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a32da80..60179ec 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -122,8 +122,9 @@
* Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
*/
cpuid = smp_processor_id();
- if (apic->wait_for_init_deassert && cpuid != 0)
- apic->wait_for_init_deassert(&init_deasserted);
+ if (apic->wait_for_init_deassert && cpuid)
+ while (!atomic_read(&init_deasserted))
+ cpu_relax();
/*
* (This works even if the APIC is not enabled.)
@@ -701,11 +702,15 @@
int id;
int boot_error;
+ preempt_disable();
+
/*
* Wake up AP by INIT, INIT, STARTUP sequence.
*/
- if (cpu)
- return wakeup_secondary_cpu_via_init(apicid, start_ip);
+ if (cpu) {
+ boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
+ goto out;
+ }
/*
* Wake up BSP by nmi.
@@ -725,6 +730,9 @@
boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
}
+out:
+ preempt_enable();
+
return boot_error;
}
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 24d3c91..6ec91c0 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -23,7 +23,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
-DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
+__visible DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index acb3b60..cfbe99f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -653,13 +653,10 @@
/* Calibrate TSC using MSR for Intel Atom SoCs */
local_irq_save(flags);
- i = try_msr_calibrate_tsc(&fast_calibrate);
+ fast_calibrate = try_msr_calibrate_tsc();
local_irq_restore(flags);
- if (i >= 0) {
- if (i == 0)
- pr_warn("Fast TSC calibration using MSR failed\n");
+ if (fast_calibrate)
return fast_calibrate;
- }
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 8b5434f..92ae6ac 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -53,7 +53,7 @@
/* TNG */
{ 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
/* VLV2 */
- { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
+ { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
/* ANN */
{ 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
};
@@ -77,21 +77,18 @@
/*
* Do MSR calibration only for known/supported CPUs.
- * Return values:
- * -1: CPU is unknown/unsupported for MSR based calibration
- * 0: CPU is known/supported, but calibration failed
- * 1: CPU is known/supported, and calibration succeeded
+ *
+ * Returns the calibration value or 0 if MSR calibration failed.
*/
-int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
+unsigned long try_msr_calibrate_tsc(void)
{
- int cpu_index;
u32 lo, hi, ratio, freq_id, freq;
+ unsigned long res;
+ int cpu_index;
cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
if (cpu_index < 0)
- return -1;
-
- *fast_calibrate = 0;
+ return 0;
if (freq_desc_tables[cpu_index].msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
@@ -103,7 +100,7 @@
pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
if (!ratio)
- return 0;
+ goto fail;
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
@@ -112,16 +109,19 @@
pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
freq_id, freq);
if (!freq)
- return 0;
+ goto fail;
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
- *fast_calibrate = freq * ratio;
- pr_info("TSC runs at %lu KHz\n", *fast_calibrate);
+ res = freq * ratio;
+ pr_info("TSC runs at %lu KHz\n", res);
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
#endif
+ return res;
- return 1;
+fail:
+ pr_warn("Fast TSC calibration using MSR failed\n");
+ return 0;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e50425d..9b53135 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2672,6 +2672,7 @@
break;
}
+ drop_large_spte(vcpu, iterator.sptep);
if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e81df8f..2de1bc0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3002,10 +3002,8 @@
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm)) {
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
return r;
- }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3567,6 +3565,8 @@
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
+ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+
if (irr == -1)
return;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a06f101..3927528 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6688,7 +6688,7 @@
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
- !(nested_read_cr0(vmcs12) & X86_CR0_TS))
+ !(vmcs12->guest_cr0 & X86_CR0_TS))
return 0;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 39c28f09..2b857843 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6186,7 +6186,7 @@
frag->len -= len;
}
- if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+ if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
index 3056702..ff4fa51 100644
--- a/arch/x86/lib/hash.c
+++ b/arch/x86/lib/hash.c
@@ -32,6 +32,7 @@
*/
#include <linux/hash.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
@@ -39,7 +40,11 @@
static inline u32 crc32_u32(u32 crc, u32 val)
{
+#ifdef CONFIG_AS_CRC32
asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
+#else
+ asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val));
+#endif
return crc;
}
@@ -49,19 +54,18 @@
u32 i, tmp = 0;
for (i = 0; i < len / 4; i++)
- seed = crc32_u32(*p32++, seed);
+ seed = crc32_u32(seed, *p32++);
- switch (3 - (len & 0x03)) {
- case 0:
+ switch (len & 3) {
+ case 3:
tmp |= *((const u8 *) p32 + 2) << 16;
/* fallthrough */
- case 1:
+ case 2:
tmp |= *((const u8 *) p32 + 1) << 8;
/* fallthrough */
- case 2:
+ case 1:
tmp |= *((const u8 *) p32);
- seed = crc32_u32(tmp, seed);
- default:
+ seed = crc32_u32(seed, tmp);
break;
}
@@ -74,12 +78,12 @@
u32 i;
for (i = 0; i < len; i++)
- seed = crc32_u32(*p32++, seed);
+ seed = crc32_u32(seed, *p32++);
return seed;
}
-void setup_arch_fast_hash(struct fast_hash_ops *ops)
+void __init setup_arch_fast_hash(struct fast_hash_ops *ops)
{
if (cpu_has_xmm4_2) {
ops->hash = intel_crc4_2_hash;
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index e78761d..a404b4b 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -4,7 +4,7 @@
#undef memcpy
#undef memset
-void *memcpy(void *to, const void *from, size_t n)
+__visible void *memcpy(void *to, const void *from, size_t n)
{
#ifdef CONFIG_X86_USE_3DNOW
return __memcpy3d(to, from, n);
@@ -14,13 +14,13 @@
}
EXPORT_SYMBOL(memcpy);
-void *memset(void *s, int c, size_t count)
+__visible void *memset(void *s, int c, size_t count)
{
return __memset(s, c, count);
}
EXPORT_SYMBOL(memset);
-void *memmove(void *dest, const void *src, size_t n)
+__visible void *memmove(void *dest, const void *src, size_t n)
{
int d0,d1,d2,d3,d4,d5;
char *ret = dest;
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 8f8eebd..db9db44 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -8,7 +8,7 @@
msrs = alloc_percpu(struct msr);
if (!msrs) {
- pr_warning("%s: error allocating msrs\n", __func__);
+ pr_warn("%s: error allocating msrs\n", __func__);
return NULL;
}
@@ -21,3 +21,90 @@
free_percpu(msrs);
}
EXPORT_SYMBOL(msrs_free);
+
+/**
+ * Read an MSR with error handling
+ *
+ * @msr: MSR to read
+ * @m: value to read into
+ *
+ * It returns read data only on success, otherwise it doesn't change the output
+ * argument @m.
+ *
+ */
+int msr_read(u32 msr, struct msr *m)
+{
+ int err;
+ u64 val;
+
+ err = rdmsrl_safe(msr, &val);
+ if (!err)
+ m->q = val;
+
+ return err;
+}
+
+/**
+ * Write an MSR with error handling
+ *
+ * @msr: MSR to write
+ * @m: value to write
+ */
+int msr_write(u32 msr, struct msr *m)
+{
+ return wrmsrl_safe(msr, m->q);
+}
+
+static inline int __flip_bit(u32 msr, u8 bit, bool set)
+{
+ struct msr m, m1;
+ int err = -EINVAL;
+
+ if (bit > 63)
+ return err;
+
+ err = msr_read(msr, &m);
+ if (err)
+ return err;
+
+ m1 = m;
+ if (set)
+ m1.q |= BIT_64(bit);
+ else
+ m1.q &= ~BIT_64(bit);
+
+ if (m1.q == m.q)
+ return 0;
+
+ err = msr_write(msr, &m);
+ if (err)
+ return err;
+
+ return 1;
+}
+
+/**
+ * Set @bit in a MSR @msr.
+ *
+ * Retval:
+ * < 0: An error was encountered.
+ * = 0: Bit was already set.
+ * > 0: Hardware accepted the MSR write.
+ */
+int msr_set_bit(u32 msr, u8 bit)
+{
+ return __flip_bit(msr, bit, true);
+}
+
+/**
+ * Clear @bit in a MSR @msr.
+ *
+ * Retval:
+ * < 0: An error was encountered.
+ * = 0: Bit was already cleared.
+ * > 0: Hardware accepted the MSR write.
+ */
+int msr_clear_bit(u32 msr, u8 bit)
+{
+ return __flip_bit(msr, bit, false);
+}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0002a3a..20621d7 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -30,6 +30,7 @@
unsigned long start_address;
unsigned long current_address;
const struct addr_marker *marker;
+ bool to_dmesg;
};
struct addr_marker {
@@ -88,10 +89,28 @@
#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
#define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
+#define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
+({ \
+ if (to_dmesg) \
+ printk(KERN_INFO fmt, ##args); \
+ else \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
+({ \
+ if (to_dmesg) \
+ printk(KERN_CONT fmt, ##args); \
+ else \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
/*
* Print a readable form of a pgprot_t to the seq_file
*/
-static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
+static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
{
pgprotval_t pr = pgprot_val(prot);
static const char * const level_name[] =
@@ -99,47 +118,47 @@
if (!pgprot_val(prot)) {
/* Not present */
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
} else {
if (pr & _PAGE_USER)
- seq_printf(m, "USR ");
+ pt_dump_cont_printf(m, dmsg, "USR ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_RW)
- seq_printf(m, "RW ");
+ pt_dump_cont_printf(m, dmsg, "RW ");
else
- seq_printf(m, "ro ");
+ pt_dump_cont_printf(m, dmsg, "ro ");
if (pr & _PAGE_PWT)
- seq_printf(m, "PWT ");
+ pt_dump_cont_printf(m, dmsg, "PWT ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_PCD)
- seq_printf(m, "PCD ");
+ pt_dump_cont_printf(m, dmsg, "PCD ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
/* Bit 9 has a different meaning on level 3 vs 4 */
if (level <= 3) {
if (pr & _PAGE_PSE)
- seq_printf(m, "PSE ");
+ pt_dump_cont_printf(m, dmsg, "PSE ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
} else {
if (pr & _PAGE_PAT)
- seq_printf(m, "pat ");
+ pt_dump_cont_printf(m, dmsg, "pat ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
}
if (pr & _PAGE_GLOBAL)
- seq_printf(m, "GLB ");
+ pt_dump_cont_printf(m, dmsg, "GLB ");
else
- seq_printf(m, " ");
+ pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_NX)
- seq_printf(m, "NX ");
+ pt_dump_cont_printf(m, dmsg, "NX ");
else
- seq_printf(m, "x ");
+ pt_dump_cont_printf(m, dmsg, "x ");
}
- seq_printf(m, "%s\n", level_name[level]);
+ pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
}
/*
@@ -178,7 +197,8 @@
st->current_prot = new_prot;
st->level = level;
st->marker = address_markers;
- seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
+ st->marker->name);
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
const char *unit = units;
@@ -188,17 +208,17 @@
/*
* Now print the actual finished series
*/
- seq_printf(m, "0x%0*lx-0x%0*lx ",
- width, st->start_address,
- width, st->current_address);
+ pt_dump_seq_printf(m, st->to_dmesg, "0x%0*lx-0x%0*lx ",
+ width, st->start_address,
+ width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
}
- seq_printf(m, "%9lu%c ", delta, *unit);
- printk_prot(m, st->current_prot, st->level);
+ pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", delta, *unit);
+ printk_prot(m, st->current_prot, st->level, st->to_dmesg);
/*
* We print markers for special areas of address space,
@@ -207,7 +227,8 @@
*/
if (st->current_address >= st->marker[1].start_address) {
st->marker++;
- seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
+ st->marker->name);
}
st->start_address = st->current_address;
@@ -296,7 +317,7 @@
#define pgd_none(a) pud_none(__pud(pgd_val(a)))
#endif
-static void walk_pgd_level(struct seq_file *m)
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
{
#ifdef CONFIG_X86_64
pgd_t *start = (pgd_t *) &init_level4_pgt;
@@ -304,9 +325,12 @@
pgd_t *start = swapper_pg_dir;
#endif
int i;
- struct pg_state st;
+ struct pg_state st = {};
- memset(&st, 0, sizeof(st));
+ if (pgd) {
+ start = pgd;
+ st.to_dmesg = true;
+ }
for (i = 0; i < PTRS_PER_PGD; i++) {
st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
@@ -331,7 +355,7 @@
static int ptdump_show(struct seq_file *m, void *v)
{
- walk_pgd_level(m);
+ ptdump_walk_pgd_level(m, NULL);
return 0;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6dea040..8e57229 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -584,8 +584,13 @@
if (error_code & PF_INSTR) {
unsigned int level;
+ pgd_t *pgd;
+ pte_t *pte;
- pte_t *pte = lookup_address(address, &level);
+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+ pgd += pgd_index(address);
+
+ pte = lookup_address_in_pgd(pgd, address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
@@ -1020,13 +1025,17 @@
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
+ *
+ * This function must have noinline because both callers
+ * {,trace_}do_page_fault() have notrace on. Having this an actual function
+ * guarantees there's a function trace entry.
*/
-static void __kprobes
-__do_page_fault(struct pt_regs *regs, unsigned long error_code)
+static void __kprobes noinline
+__do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
- unsigned long address;
struct mm_struct *mm;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -1034,9 +1043,6 @@
tsk = current;
mm = tsk->mm;
- /* Get the faulting address: */
- address = read_cr2();
-
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
@@ -1248,32 +1254,50 @@
up_read(&mm->mmap_sem);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ unsigned long address = read_cr2(); /* Get the faulting address */
enum ctx_state prev_state;
+ /*
+ * We must have this function tagged with __kprobes, notrace and call
+ * read_cr2() before calling anything else. To avoid calling any kind
+ * of tracing machinery before we've observed the CR2 value.
+ *
+ * exception_{enter,exit}() contain all sorts of tracepoints.
+ */
+
prev_state = exception_enter();
- __do_page_fault(regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
-static void trace_page_fault_entries(struct pt_regs *regs,
+#ifdef CONFIG_TRACING
+static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
if (user_mode(regs))
- trace_page_fault_user(read_cr2(), regs, error_code);
+ trace_page_fault_user(address, regs, error_code);
else
- trace_page_fault_kernel(read_cr2(), regs, error_code);
+ trace_page_fault_kernel(address, regs, error_code);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ /*
+ * The exception_enter and tracepoint processing could
+ * trigger another page faults (user space callchain
+ * reading) and destroy the original cr2 value, so read
+ * the faulting address now.
+ */
+ unsigned long address = read_cr2();
enum ctx_state prev_state;
prev_state = exception_enter();
- trace_page_fault_entries(regs, error_code);
- __do_page_fault(regs, error_code);
+ trace_page_fault_entries(address, regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
+#endif /* CONFIG_TRACING */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index b3b19f4..ae242a7 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -126,8 +126,8 @@
* @vaddr: virtual start address
* @size: number of bytes to flush
*
- * clflush is an unordered instruction which needs fencing with mfence
- * to avoid ordering issues.
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues.
*/
void clflush_cache_range(void *vaddr, unsigned int size)
{
@@ -136,11 +136,11 @@
mb();
for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
- clflush(vaddr);
+ clflushopt(vaddr);
/*
* Flush any possible final partial cacheline:
*/
- clflush(vend);
+ clflushopt(vend);
mb();
}
@@ -323,8 +323,12 @@
return prot;
}
-static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
- unsigned int *level)
+/*
+ * Lookup the page table entry for a virtual address in a specific pgd.
+ * Return a pointer to the entry and the level of the mapping.
+ */
+pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
{
pud_t *pud;
pmd_t *pmd;
@@ -365,7 +369,7 @@
*/
pte_t *lookup_address(unsigned long address, unsigned int *level)
{
- return __lookup_address_in_pgd(pgd_offset_k(address), address, level);
+ return lookup_address_in_pgd(pgd_offset_k(address), address, level);
}
EXPORT_SYMBOL_GPL(lookup_address);
@@ -373,7 +377,7 @@
unsigned int *level)
{
if (cpa->pgd)
- return __lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+ return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
address, level);
return lookup_address(address, level);
@@ -692,6 +696,18 @@
return true;
}
+static bool try_to_free_pud_page(pud_t *pud)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ if (!pud_none(pud[i]))
+ return false;
+
+ free_page((unsigned long)pud);
+ return true;
+}
+
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
{
pte_t *pte = pte_offset_kernel(pmd, start);
@@ -805,6 +821,16 @@
*/
}
+static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
+{
+ pgd_t *pgd_entry = root + pgd_index(addr);
+
+ unmap_pud_range(pgd_entry, addr, end);
+
+ if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
+ pgd_clear(pgd_entry);
+}
+
static int alloc_pte_page(pmd_t *pmd)
{
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -999,9 +1025,8 @@
static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
{
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
- bool allocd_pgd = false;
- pgd_t *pgd_entry;
pud_t *pud = NULL; /* shut up gcc */
+ pgd_t *pgd_entry;
int ret;
pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1015,7 +1040,6 @@
return -1;
set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
- allocd_pgd = true;
}
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
@@ -1023,19 +1047,11 @@
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
if (ret < 0) {
- unmap_pud_range(pgd_entry, addr,
+ unmap_pgd_range(cpa->pgd, addr,
addr + (cpa->numpages << PAGE_SHIFT));
-
- if (allocd_pgd) {
- /*
- * If I allocated this PUD page, I can just as well
- * free it in this error path.
- */
- pgd_clear(pgd_entry);
- free_page((unsigned long)pud);
- }
return ret;
}
+
cpa->numpages = ret;
return 0;
}
@@ -1377,10 +1393,10 @@
cache = cache_attr(mask_set);
/*
- * On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it and in the
+ * On success we use CLFLUSH, when the CPU supports it to
+ * avoid the WBINVD. If the CPU does not support it and in the
* error case we fall back to cpa_flush_all (which uses
- * wbindv):
+ * WBINVD):
*/
if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
@@ -1861,6 +1877,12 @@
return retval;
}
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+ unsigned numpages)
+{
+ unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
+}
+
/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1..0149575 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index b7b0b35..d51045a 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
+obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1a201ac..3781dd3 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -52,6 +52,7 @@
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/rtc.h>
+#include <asm/uv/uv.h>
#define EFI_DEBUG
@@ -67,9 +68,7 @@
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
-unsigned long x86_efi_facility;
-
-static __initdata efi_config_table_type_t arch_tables[] = {
+static efi_config_table_type_t arch_tables[] __initdata = {
#ifdef CONFIG_X86_UV
{UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
#endif
@@ -78,16 +77,7 @@
u64 efi_setup; /* efi setup_data physical address */
-/*
- * Returns 1 if 'facility' is enabled, 0 otherwise.
- */
-int efi_enabled(int facility)
-{
- return test_bit(facility, &x86_efi_facility) != 0;
-}
-EXPORT_SYMBOL(efi_enabled);
-
-static bool __initdata disable_runtime = false;
+static bool disable_runtime __initdata = false;
static int __init setup_noefi(char *arg)
{
disable_runtime = true;
@@ -256,27 +246,12 @@
return status;
}
-static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
- efi_time_cap_t *tc)
-{
- unsigned long flags;
- efi_status_t status;
-
- spin_lock_irqsave(&rtc_lock, flags);
- efi_call_phys_prelog();
- status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
- virt_to_phys(tc));
- efi_call_phys_epilog();
- spin_unlock_irqrestore(&rtc_lock, flags);
- return status;
-}
-
int efi_set_rtc_mmss(const struct timespec *now)
{
unsigned long nowtime = now->tv_sec;
- efi_status_t status;
- efi_time_t eft;
- efi_time_cap_t cap;
+ efi_status_t status;
+ efi_time_t eft;
+ efi_time_cap_t cap;
struct rtc_time tm;
status = efi.get_time(&eft, &cap);
@@ -294,9 +269,8 @@
eft.second = tm.tm_sec;
eft.nanosecond = 0;
} else {
- printk(KERN_ERR
- "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
- __FUNCTION__, nowtime);
+ pr_err("%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
+ __func__, nowtime);
return -1;
}
@@ -412,8 +386,7 @@
p < memmap.map_end;
p += memmap.desc_size, i++) {
md = p;
- pr_info("mem%02u: type=%u, attr=0x%llx, "
- "range=[0x%016llx-0x%016llx) (%lluMB)\n",
+ pr_info("mem%02u: type=%u, attr=0x%llx, range=[0x%016llx-0x%016llx) (%lluMB)\n",
i, md->type, md->attribute, md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
@@ -445,9 +418,8 @@
memblock_is_region_reserved(start, size)) {
/* Could not reserve, skip it */
md->num_pages = 0;
- memblock_dbg("Could not reserve boot range "
- "[0x%010llx-0x%010llx]\n",
- start, start+size-1);
+ memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
+ start, start+size-1);
} else
memblock_reserve(start, size);
}
@@ -455,7 +427,7 @@
void __init efi_unmap_memmap(void)
{
- clear_bit(EFI_MEMMAP, &x86_efi_facility);
+ clear_bit(EFI_MEMMAP, &efi.flags);
if (memmap.map) {
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
memmap.map = NULL;
@@ -466,9 +438,6 @@
{
void *p;
- if (!efi_is_native())
- return;
-
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
@@ -583,17 +552,66 @@
return -EINVAL;
}
if ((efi.systab->hdr.revision >> 16) == 0)
- pr_err("Warning: System table version "
- "%d.%02d, expected 1.00 or greater!\n",
+ pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ return 0;
+}
+
+static int __init efi_runtime_init32(void)
+{
+ efi_runtime_services_32_t *runtime;
+
+ runtime = early_ioremap((unsigned long)efi.systab->runtime,
+ sizeof(efi_runtime_services_32_t));
+ if (!runtime) {
+ pr_err("Could not map the runtime service table!\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * We will only need *early* access to the following two
+ * EFI runtime services before set_virtual_address_map
+ * is invoked.
+ */
+ efi_phys.set_virtual_address_map =
+ (efi_set_virtual_address_map_t *)
+ (unsigned long)runtime->set_virtual_address_map;
+ early_iounmap(runtime, sizeof(efi_runtime_services_32_t));
+
+ return 0;
+}
+
+static int __init efi_runtime_init64(void)
+{
+ efi_runtime_services_64_t *runtime;
+
+ runtime = early_ioremap((unsigned long)efi.systab->runtime,
+ sizeof(efi_runtime_services_64_t));
+ if (!runtime) {
+ pr_err("Could not map the runtime service table!\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * We will only need *early* access to the following two
+ * EFI runtime services before set_virtual_address_map
+ * is invoked.
+ */
+ efi_phys.set_virtual_address_map =
+ (efi_set_virtual_address_map_t *)
+ (unsigned long)runtime->set_virtual_address_map;
+ early_iounmap(runtime, sizeof(efi_runtime_services_64_t));
+
return 0;
}
static int __init efi_runtime_init(void)
{
- efi_runtime_services_t *runtime;
+ int rv;
/*
* Check out the runtime services table. We need to map
@@ -601,27 +619,15 @@
* address of several of the EFI runtime functions, needed to
* set the firmware into virtual mode.
*/
- runtime = early_ioremap((unsigned long)efi.systab->runtime,
- sizeof(efi_runtime_services_t));
- if (!runtime) {
- pr_err("Could not map the runtime service table!\n");
- return -ENOMEM;
- }
- /*
- * We will only need *early* access to the following
- * two EFI runtime services before set_virtual_address_map
- * is invoked.
- */
- efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
- efi_phys.set_virtual_address_map =
- (efi_set_virtual_address_map_t *)
- runtime->set_virtual_address_map;
- /*
- * Make efi_get_time can be called before entering
- * virtual mode.
- */
- efi.get_time = phys_efi_get_time;
- early_iounmap(runtime, sizeof(efi_runtime_services_t));
+ if (efi_enabled(EFI_64BIT))
+ rv = efi_runtime_init64();
+ else
+ rv = efi_runtime_init32();
+
+ if (rv)
+ return rv;
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return 0;
}
@@ -640,6 +646,8 @@
if (add_efi_memmap)
do_add_efi_memmap();
+ set_bit(EFI_MEMMAP, &efi.flags);
+
return 0;
}
@@ -722,7 +730,7 @@
if (efi_systab_init(efi_phys.systab))
return;
- set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
efi.config_table = (unsigned long)efi.systab->tables;
efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
@@ -750,24 +758,21 @@
if (efi_config_init(arch_tables))
return;
- set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
-
/*
* Note: We currently don't support runtime services on an EFI
* that doesn't match the kernel 32/64-bit mode.
*/
- if (!efi_is_native())
+ if (!efi_runtime_supported())
pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
else {
if (disable_runtime || efi_runtime_init())
return;
- set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
}
if (efi_memmap_init())
return;
- set_bit(EFI_MEMMAP, &x86_efi_facility);
+ set_bit(EFI_MEMMAP, &efi.flags);
print_efi_memmap();
}
@@ -844,6 +849,22 @@
(unsigned long long)md->phys_addr);
}
+static void native_runtime_setup(void)
+{
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+ efi.set_wakeup_time = virt_efi_set_wakeup_time;
+ efi.get_variable = virt_efi_get_variable;
+ efi.get_next_variable = virt_efi_get_next_variable;
+ efi.set_variable = virt_efi_set_variable;
+ efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+ efi.reset_system = virt_efi_reset_system;
+ efi.query_variable_info = virt_efi_query_variable_info;
+ efi.update_capsule = virt_efi_update_capsule;
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
+}
+
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
@@ -891,8 +912,9 @@
}
}
-static int __init save_runtime_map(void)
+static void __init save_runtime_map(void)
{
+#ifdef CONFIG_KEXEC
efi_memory_desc_t *md;
void *tmp, *p, *q = NULL;
int count = 0;
@@ -914,41 +936,47 @@
}
efi_runtime_map_setup(q, count, memmap.desc_size);
+ return;
- return 0;
out:
kfree(q);
- return -ENOMEM;
+ pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+#endif
+}
+
+static void *realloc_pages(void *old_memmap, int old_shift)
+{
+ void *ret;
+
+ ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
+ if (!ret)
+ goto out;
+
+ /*
+ * A first-time allocation doesn't have anything to copy.
+ */
+ if (!old_memmap)
+ return ret;
+
+ memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
+
+out:
+ free_pages((unsigned long)old_memmap, old_shift);
+ return ret;
}
/*
- * Map efi regions which were passed via setup_data. The virt_addr is a fixed
- * addr which was used in first kernel of a kexec boot.
+ * Map the efi memory ranges of the runtime services and update new_mmap with
+ * virtual addresses.
*/
-static void __init efi_map_regions_fixed(void)
+static void * __init efi_map_regions(int *count, int *pg_shift)
{
- void *p;
+ void *p, *new_memmap = NULL;
+ unsigned long left = 0;
efi_memory_desc_t *md;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- efi_map_region_fixed(md); /* FIXME: add error handling */
- get_systab_virt_addr(md);
- }
-
-}
-
-/*
- * Map efi memory ranges for runtime serivce and update new_memmap with virtual
- * addresses.
- */
-static void * __init efi_map_regions(int *count)
-{
- efi_memory_desc_t *md;
- void *p, *tmp, *new_memmap = NULL;
-
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
if (md->type != EFI_BOOT_SERVICES_CODE &&
@@ -960,20 +988,80 @@
efi_map_region(md);
get_systab_virt_addr(md);
- tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
- GFP_KERNEL);
- if (!tmp)
- goto out;
- new_memmap = tmp;
+ if (left < memmap.desc_size) {
+ new_memmap = realloc_pages(new_memmap, *pg_shift);
+ if (!new_memmap)
+ return NULL;
+
+ left += PAGE_SIZE << *pg_shift;
+ (*pg_shift)++;
+ }
+
memcpy(new_memmap + (*count * memmap.desc_size), md,
memmap.desc_size);
+
+ left -= memmap.desc_size;
(*count)++;
}
return new_memmap;
-out:
- kfree(new_memmap);
- return NULL;
+}
+
+static void __init kexec_enter_virtual_mode(void)
+{
+#ifdef CONFIG_KEXEC
+ efi_memory_desc_t *md;
+ void *p;
+
+ efi.systab = NULL;
+
+ /*
+ * We don't do virtual mode, since we don't do runtime services, on
+ * non-native EFI
+ */
+ if (!efi_is_native()) {
+ efi_unmap_memmap();
+ return;
+ }
+
+ /*
+ * Map efi regions which were passed via setup_data. The virt_addr is a
+ * fixed addr which was used in first kernel of a kexec boot.
+ */
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ efi_map_region_fixed(md); /* FIXME: add error handling */
+ get_systab_virt_addr(md);
+ }
+
+ save_runtime_map();
+
+ BUG_ON(!efi.systab);
+
+ efi_sync_low_kernel_mappings();
+
+ /*
+ * Now that EFI is in virtual mode, update the function
+ * pointers in the runtime service table to the new virtual addresses.
+ *
+ * Call EFI services through wrapper functions.
+ */
+ efi.runtime_version = efi_systab.hdr.revision;
+
+ native_runtime_setup();
+
+ efi.set_virtual_address_map = NULL;
+
+ if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
+ runtime_code_page_mkexec();
+
+ /* clean DUMMY object */
+ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ 0, NULL);
+#endif
}
/*
@@ -995,57 +1083,53 @@
*
* Specially for kexec boot, efi runtime maps in previous kernel should
* be passed in via setup_data. In that case runtime ranges will be mapped
- * to the same virtual addresses as the first kernel.
+ * to the same virtual addresses as the first kernel, see
+ * kexec_enter_virtual_mode().
*/
-void __init efi_enter_virtual_mode(void)
+static void __init __efi_enter_virtual_mode(void)
{
- efi_status_t status;
+ int count = 0, pg_shift = 0;
void *new_memmap = NULL;
- int err, count = 0;
+ efi_status_t status;
efi.systab = NULL;
- /*
- * We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI
- */
- if (!efi_is_native()) {
- efi_unmap_memmap();
+ efi_merge_regions();
+ new_memmap = efi_map_regions(&count, &pg_shift);
+ if (!new_memmap) {
+ pr_err("Error reallocating memory, EFI runtime non-functional!\n");
return;
}
- if (efi_setup) {
- efi_map_regions_fixed();
- } else {
- efi_merge_regions();
- new_memmap = efi_map_regions(&count);
- if (!new_memmap) {
- pr_err("Error reallocating memory, EFI runtime non-functional!\n");
- return;
- }
- }
-
- err = save_runtime_map();
- if (err)
- pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+ save_runtime_map();
BUG_ON(!efi.systab);
- efi_setup_page_tables();
+ if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift))
+ return;
+
efi_sync_low_kernel_mappings();
+ efi_dump_pagetable();
- if (!efi_setup) {
+ if (efi_is_native()) {
status = phys_efi_set_virtual_address_map(
- memmap.desc_size * count,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)__pa(new_memmap));
+ memmap.desc_size * count,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+ } else {
+ status = efi_thunk_set_virtual_address_map(
+ efi_phys.set_virtual_address_map,
+ memmap.desc_size * count,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+ }
- if (status != EFI_SUCCESS) {
- pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
- status);
- panic("EFI call to SetVirtualAddressMap() failed!");
- }
+ if (status != EFI_SUCCESS) {
+ pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
+ status);
+ panic("EFI call to SetVirtualAddressMap() failed!");
}
/*
@@ -1055,23 +1139,43 @@
* Call EFI services through wrapper functions.
*/
efi.runtime_version = efi_systab.hdr.revision;
- efi.get_time = virt_efi_get_time;
- efi.set_time = virt_efi_set_time;
- efi.get_wakeup_time = virt_efi_get_wakeup_time;
- efi.set_wakeup_time = virt_efi_set_wakeup_time;
- efi.get_variable = virt_efi_get_variable;
- efi.get_next_variable = virt_efi_get_next_variable;
- efi.set_variable = virt_efi_set_variable;
- efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
- efi.reset_system = virt_efi_reset_system;
+
+ if (efi_is_native())
+ native_runtime_setup();
+ else
+ efi_thunk_runtime_setup();
+
efi.set_virtual_address_map = NULL;
- efi.query_variable_info = virt_efi_query_variable_info;
- efi.update_capsule = virt_efi_update_capsule;
- efi.query_capsule_caps = virt_efi_query_capsule_caps;
efi_runtime_mkexec();
- kfree(new_memmap);
+ /*
+ * We mapped the descriptor array into the EFI pagetable above but we're
+ * not unmapping it here. Here's why:
+ *
+ * We're copying select PGDs from the kernel page table to the EFI page
+ * table and when we do so and make changes to those PGDs like unmapping
+ * stuff from them, those changes appear in the kernel page table and we
+ * go boom.
+ *
+ * From setup_real_mode():
+ *
+ * ...
+ * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+ *
+ * In this particular case, our allocation is in PGD 0 of the EFI page
+ * table but we've copied that PGD from PGD[272] of the EFI page table:
+ *
+ * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
+ *
+ * where the direct memory mapping in kernel space is.
+ *
+ * new_memmap's VA comes from that direct mapping and thus clearing it,
+ * it would get cleared in the kernel page table too.
+ *
+ * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+ */
+ free_pages((unsigned long)new_memmap, pg_shift);
/* clean DUMMY object */
efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
@@ -1081,6 +1185,14 @@
0, NULL);
}
+void __init efi_enter_virtual_mode(void)
+{
+ if (efi_setup)
+ kexec_enter_virtual_mode();
+ else
+ __efi_enter_virtual_mode();
+}
+
/*
* Convenience functions to obtain memory types and attributes
*/
@@ -1118,9 +1230,8 @@
}
/*
- * Some firmware has serious problems when using more than 50% of the EFI
- * variable store, i.e. it triggers bugs that can brick machines. Ensure that
- * we never use more than this safe limit.
+ * Some firmware implementations refuse to boot if there's insufficient space
+ * in the variable store. Ensure that we never use more than a safe limit.
*
* Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
* store.
@@ -1139,10 +1250,9 @@
return status;
/*
- * Some firmware implementations refuse to boot if there's insufficient
- * space in the variable store. We account for that by refusing the
- * write if permitting it would reduce the available space to under
- * 5KB. This figure was provided by Samsung, so should be safe.
+ * We account for that by refusing the write if permitting it would
+ * reduce the available space to under 5KB. This figure was provided by
+ * Samsung, so should be safe.
*/
if ((remaining_size - size < EFI_MIN_RESERVE) &&
!efi_no_storage_paranoia) {
@@ -1205,8 +1315,27 @@
str++;
if (!strncmp(str, "old_map", 7))
- set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+ set_bit(EFI_OLD_MEMMAP, &efi.flags);
return 0;
}
early_param("efi", parse_efi_cmdline);
+
+void __init efi_apply_memmap_quirks(void)
+{
+ /*
+ * Once setup is done earlier, unmap the EFI memory map on mismatched
+ * firmware/kernel architectures since there is no support for runtime
+ * services.
+ */
+ if (!efi_runtime_supported()) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_unmap_memmap();
+ }
+
+ /*
+ * UV doesn't support the new EFI pagetable mapping yet.
+ */
+ if (is_uv_system())
+ set_bit(EFI_OLD_MEMMAP, &efi.flags);
+}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 0b74cdf..9ee3491 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -40,7 +40,12 @@
static unsigned long efi_rt_eflags;
void efi_sync_low_kernel_mappings(void) {}
-void efi_setup_page_tables(void) {}
+void __init efi_dump_pagetable(void) {}
+int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+ return 0;
+}
+void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) {}
void __init efi_map_region(efi_memory_desc_t *md)
{
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 0c2a234..290d397 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -39,6 +39,7 @@
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/realmode.h>
+#include <asm/time.h>
static pgd_t *save_pgd __initdata;
static unsigned long efi_flags __initdata;
@@ -58,7 +59,8 @@
u64 prev_cr3;
pgd_t *efi_pgt;
bool use_pgd;
-};
+ u64 phys_stack;
+} __packed;
static void __init early_code_mapping_set_exec(int executable)
{
@@ -137,12 +139,64 @@
sizeof(pgd_t) * num_pgds);
}
-void efi_setup_page_tables(void)
+int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+ unsigned long text;
+ struct page *page;
+ unsigned npages;
+ pgd_t *pgd;
- if (!efi_enabled(EFI_OLD_MEMMAP))
- efi_scratch.use_pgd = true;
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return 0;
+
+ efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+ pgd = __va(efi_scratch.efi_pgt);
+
+ /*
+ * It can happen that the physical address of new_memmap lands in memory
+ * which is not mapped in the EFI page table. Therefore we need to go
+ * and ident-map those pages containing the map before calling
+ * phys_efi_set_virtual_address_map().
+ */
+ if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+ pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
+ return 1;
+ }
+
+ efi_scratch.use_pgd = true;
+
+ /*
+ * When making calls to the firmware everything needs to be 1:1
+ * mapped and addressable with 32-bit pointers. Map the kernel
+ * text and allocate a new stack because we can't rely on the
+ * stack pointer being < 4GB.
+ */
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
+ return 0;
+
+ page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!page)
+ panic("Unable to allocate EFI runtime stack < 4GB\n");
+
+ efi_scratch.phys_stack = virt_to_phys(page_address(page));
+ efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
+
+ npages = (_end - _text) >> PAGE_SHIFT;
+ text = __pa(_text);
+
+ if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+ pr_err("Failed to map kernel text 1:1\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+ kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
}
static void __init __map_region(efi_memory_desc_t *md, u64 va)
@@ -173,6 +227,16 @@
*/
__map_region(md, md->phys_addr);
+ /*
+ * Enforce the 1:1 mapping as the default virtual address when
+ * booting in EFI mixed mode, because even though we may be
+ * running a 64-bit kernel, the firmware may only be 32-bit.
+ */
+ if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
+ md->virt_addr = md->phys_addr;
+ return;
+ }
+
efi_va -= size;
/* Is PA 2M-aligned? */
@@ -242,3 +306,299 @@
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
}
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+ ptdump_walk_pgd_level(NULL, pgd);
+#endif
+}
+
+#ifdef CONFIG_EFI_MIXED
+extern efi_status_t efi64_thunk(u32, ...);
+
+#define runtime_service32(func) \
+({ \
+ u32 table = (u32)(unsigned long)efi.systab; \
+ u32 *rt, *___f; \
+ \
+ rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
+ ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
+ *___f; \
+})
+
+/*
+ * Switch to the EFI page tables early so that we can access the 1:1
+ * runtime services mappings which are not mapped in any other page
+ * tables. This function must be called before runtime_service32().
+ *
+ * Also, disable interrupts because the IDT points to 64-bit handlers,
+ * which aren't going to function correctly when we switch to 32-bit.
+ */
+#define efi_thunk(f, ...) \
+({ \
+ efi_status_t __s; \
+ unsigned long flags; \
+ u32 func; \
+ \
+ efi_sync_low_kernel_mappings(); \
+ local_irq_save(flags); \
+ \
+ efi_scratch.prev_cr3 = read_cr3(); \
+ write_cr3((unsigned long)efi_scratch.efi_pgt); \
+ __flush_tlb_all(); \
+ \
+ func = runtime_service32(f); \
+ __s = efi64_thunk(func, __VA_ARGS__); \
+ \
+ write_cr3(efi_scratch.prev_cr3); \
+ __flush_tlb_all(); \
+ local_irq_restore(flags); \
+ \
+ __s; \
+})
+
+efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map)
+{
+ efi_status_t status;
+ unsigned long flags;
+ u32 func;
+
+ efi_sync_low_kernel_mappings();
+ local_irq_save(flags);
+
+ efi_scratch.prev_cr3 = read_cr3();
+ write_cr3((unsigned long)efi_scratch.efi_pgt);
+ __flush_tlb_all();
+
+ func = (u32)(unsigned long)phys_set_virtual_address_map;
+ status = efi64_thunk(func, memory_map_size, descriptor_size,
+ descriptor_version, virtual_map);
+
+ write_cr3(efi_scratch.prev_cr3);
+ __flush_tlb_all();
+ local_irq_restore(flags);
+
+ return status;
+}
+
+static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+ efi_status_t status;
+ u32 phys_tm, phys_tc;
+
+ spin_lock(&rtc_lock);
+
+ phys_tm = virt_to_phys(tm);
+ phys_tc = virt_to_phys(tc);
+
+ status = efi_thunk(get_time, phys_tm, phys_tc);
+
+ spin_unlock(&rtc_lock);
+
+ return status;
+}
+
+static efi_status_t efi_thunk_set_time(efi_time_t *tm)
+{
+ efi_status_t status;
+ u32 phys_tm;
+
+ spin_lock(&rtc_lock);
+
+ phys_tm = virt_to_phys(tm);
+
+ status = efi_thunk(set_time, phys_tm);
+
+ spin_unlock(&rtc_lock);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
+ efi_time_t *tm)
+{
+ efi_status_t status;
+ u32 phys_enabled, phys_pending, phys_tm;
+
+ spin_lock(&rtc_lock);
+
+ phys_enabled = virt_to_phys(enabled);
+ phys_pending = virt_to_phys(pending);
+ phys_tm = virt_to_phys(tm);
+
+ status = efi_thunk(get_wakeup_time, phys_enabled,
+ phys_pending, phys_tm);
+
+ spin_unlock(&rtc_lock);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+ efi_status_t status;
+ u32 phys_tm;
+
+ spin_lock(&rtc_lock);
+
+ phys_tm = virt_to_phys(tm);
+
+ status = efi_thunk(set_wakeup_time, enabled, phys_tm);
+
+ spin_unlock(&rtc_lock);
+
+ return status;
+}
+
+
+static efi_status_t
+efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size, void *data)
+{
+ efi_status_t status;
+ u32 phys_name, phys_vendor, phys_attr;
+ u32 phys_data_size, phys_data;
+
+ phys_data_size = virt_to_phys(data_size);
+ phys_vendor = virt_to_phys(vendor);
+ phys_name = virt_to_phys(name);
+ phys_attr = virt_to_phys(attr);
+ phys_data = virt_to_phys(data);
+
+ status = efi_thunk(get_variable, phys_name, phys_vendor,
+ phys_attr, phys_data_size, phys_data);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
+{
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+
+ phys_name = virt_to_phys(name);
+ phys_vendor = virt_to_phys(vendor);
+ phys_data = virt_to_phys(data);
+
+ /* If data_size is > sizeof(u32) we've got problems */
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ efi_status_t status;
+ u32 phys_name_size, phys_name, phys_vendor;
+
+ phys_name_size = virt_to_phys(name_size);
+ phys_vendor = virt_to_phys(vendor);
+ phys_name = virt_to_phys(name);
+
+ status = efi_thunk(get_next_variable, phys_name_size,
+ phys_name, phys_vendor);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_high_mono_count(u32 *count)
+{
+ efi_status_t status;
+ u32 phys_count;
+
+ phys_count = virt_to_phys(count);
+ status = efi_thunk(get_next_high_mono_count, phys_count);
+
+ return status;
+}
+
+static void
+efi_thunk_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
+{
+ u32 phys_data;
+
+ phys_data = virt_to_phys(data);
+
+ efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+}
+
+static efi_status_t
+efi_thunk_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count, unsigned long sg_list)
+{
+ /*
+ * To properly support this function we would need to repackage
+ * 'capsules' because the firmware doesn't understand 64-bit
+ * pointers.
+ */
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ phys_storage = virt_to_phys(storage_space);
+ phys_remaining = virt_to_phys(remaining_space);
+ phys_max = virt_to_phys(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count, u64 *max_size,
+ int *reset_type)
+{
+ /*
+ * To properly support this function we would need to repackage
+ * 'capsules' because the firmware doesn't understand 64-bit
+ * pointers.
+ */
+ return EFI_UNSUPPORTED;
+}
+
+void efi_thunk_runtime_setup(void)
+{
+ efi.get_time = efi_thunk_get_time;
+ efi.set_time = efi_thunk_set_time;
+ efi.get_wakeup_time = efi_thunk_get_wakeup_time;
+ efi.set_wakeup_time = efi_thunk_set_wakeup_time;
+ efi.get_variable = efi_thunk_get_variable;
+ efi.get_next_variable = efi_thunk_get_next_variable;
+ efi.set_variable = efi_thunk_set_variable;
+ efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
+ efi.reset_system = efi_thunk_reset_system;
+ efi.query_variable_info = efi_thunk_query_variable_info;
+ efi.update_capsule = efi_thunk_update_capsule;
+ efi.query_capsule_caps = efi_thunk_query_capsule_caps;
+}
+#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 88073b1..e0984ef 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -7,6 +7,10 @@
*/
#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr.h>
+#include <asm/processor-flags.h>
+#include <asm/page_types.h>
#define SAVE_XMM \
mov %rsp, %rax; \
@@ -164,7 +168,169 @@
ret
ENDPROC(efi_call6)
+#ifdef CONFIG_EFI_MIXED
+
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+ movl %ds, %eax
+ push %rax
+ movl %es, %eax
+ push %rax
+ movl %ss, %eax
+ push %rax
+
+ subq $32, %rsp
+ movl %esi, 0x0(%rsp)
+ movl %edx, 0x4(%rsp)
+ movl %ecx, 0x8(%rsp)
+ movq %r8, %rsi
+ movl %esi, 0xc(%rsp)
+ movq %r9, %rsi
+ movl %esi, 0x10(%rsp)
+
+ sgdt save_gdt(%rip)
+
+ leaq 1f(%rip), %rbx
+ movq %rbx, func_rt_ptr(%rip)
+
+ /* Switch to gdt with 32-bit segments */
+ movl 64(%rsp), %eax
+ lgdt (%rax)
+
+ leaq efi_enter32(%rip), %rax
+ pushq $__KERNEL_CS
+ pushq %rax
+ lretq
+
+1: addq $32, %rsp
+
+ lgdt save_gdt(%rip)
+
+ pop %rbx
+ movl %ebx, %ss
+ pop %rbx
+ movl %ebx, %es
+ pop %rbx
+ movl %ebx, %ds
+
+ /*
+ * Convert 32-bit status code into 64-bit.
+ */
+ test %rax, %rax
+ jz 1f
+ movl %eax, %ecx
+ andl $0x0fffffff, %ecx
+ andl $0xf0000000, %eax
+ shl $32, %rax
+ or %rcx, %rax
+1:
+ ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+ movq func_rt_ptr(%rip), %rax
+ push %rax
+ mov %rdi, %rax
+ ret
+ENDPROC(efi_exit32)
+
+ .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+ movl $__KERNEL_DS, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+
+ /* Reload pgtables */
+ movl %cr3, %eax
+ movl %eax, %cr3
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Disable long mode via EFER */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btrl $_EFER_LME, %eax
+ wrmsr
+
+ call *%edi
+
+ /* We must preserve return value */
+ movl %eax, %edi
+
+ /*
+ * Some firmware will return with interrupts enabled. Be sure to
+ * disable them before we switch GDTs.
+ */
+ cli
+
+ movl 68(%esp), %eax
+ movl %eax, 2(%eax)
+ lgdtl (%eax)
+
+ movl %cr4, %eax
+ btsl $(X86_CR4_PAE_BIT), %eax
+ movl %eax, %cr4
+
+ movl %cr3, %eax
+ movl %eax, %cr3
+
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ xorl %eax, %eax
+ lldt %ax
+
+ movl 72(%esp), %eax
+ pushl $__KERNEL_CS
+ pushl %eax
+
+ /* Enable paging */
+ movl %cr0, %eax
+ btsl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+ lret
+ENDPROC(efi_enter32)
+
+ .data
+ .balign 8
+ .global efi32_boot_gdt
+efi32_boot_gdt: .word 0
+ .quad 0
+
+save_gdt: .word 0
+ .quad 0
+func_rt_ptr: .quad 0
+
+ .global efi_gdt64
+efi_gdt64:
+ .word efi_gdt64_end - efi_gdt64
+ .long 0 /* Filled out by user */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+efi_gdt64_end:
+#endif /* CONFIG_EFI_MIXED */
+
.data
ENTRY(efi_scratch)
.fill 3,8,0
.byte 0
+ .quad 0
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
new file mode 100644
index 0000000..8806fa7
--- /dev/null
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+ .text
+ .code64
+ENTRY(efi64_thunk)
+ push %rbp
+ push %rbx
+
+ /*
+ * Switch to 1:1 mapped 32-bit stack pointer.
+ */
+ movq %rsp, efi_saved_sp(%rip)
+ movq efi_scratch+25(%rip), %rsp
+
+ /*
+ * Calculate the physical address of the kernel text.
+ */
+ movq $__START_KERNEL_map, %rax
+ subq phys_base(%rip), %rax
+
+ /*
+ * Push some physical addresses onto the stack. This is easier
+ * to do now in a code64 section while the assembler can address
+ * 64-bit values. Note that all the addresses on the stack are
+ * 32-bit.
+ */
+ subq $16, %rsp
+ leaq efi_exit32(%rip), %rbx
+ subq %rax, %rbx
+ movl %ebx, 8(%rsp)
+ leaq efi_gdt64(%rip), %rbx
+ subq %rax, %rbx
+ movl %ebx, 2(%ebx)
+ movl %ebx, 4(%rsp)
+ leaq efi_gdt32(%rip), %rbx
+ subq %rax, %rbx
+ movl %ebx, 2(%ebx)
+ movl %ebx, (%rsp)
+
+ leaq __efi64_thunk(%rip), %rbx
+ subq %rax, %rbx
+ call *%rbx
+
+ movq efi_saved_sp(%rip), %rsp
+ pop %rbx
+ pop %rbp
+ retq
+ENDPROC(efi64_thunk)
+
+ .data
+efi_gdt32:
+ .word efi_gdt32_end - efi_gdt32
+ .long 0 /* Filled out above */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00cf9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+efi_gdt32_end:
+
+efi_saved_sp: .quad 0
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
index 39febb2..9471b94 100644
--- a/arch/x86/platform/ts5500/ts5500.c
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -88,7 +88,7 @@
static const struct {
const char * const string;
const ssize_t offset;
-} ts5500_signatures[] __initdata = {
+} ts5500_signatures[] __initconst = {
{ "TS-5x00 AMD Elan", 0xb14 },
};
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 7d01b8c..cc04e67 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -40,11 +40,7 @@
#define smp_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
-#ifdef CONFIG_X86_OOSTORE
-#define smp_wmb() wmb()
-#else /* CONFIG_X86_OOSTORE */
#define smp_wmb() barrier()
-#endif /* CONFIG_X86_OOSTORE */
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index fd14be1..9206ac7 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -2,6 +2,8 @@
# Building vDSO images for x86.
#
+KBUILD_CFLAGS += $(DISABLE_LTO)
+
VDSO64-$(CONFIG_X86_64) := y
VDSOX32-$(CONFIG_X86_X32_ABI) := y
VDSO32-$(CONFIG_X86_32) := y
@@ -35,7 +37,8 @@
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,--no-undefined \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+ $(DISABLE_LTO)
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
@@ -127,7 +130,7 @@
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
# This makes sure the $(obj) subdirectory exists even though vdso32/
# is not a kbuild sub-make subdirectory.
@@ -181,7 +184,8 @@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+ $(LTO_CFLAGS)
GCOV_PROFILE := n
#
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e..2423ef0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index ba56e11..c87ae7c 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,6 +20,7 @@
select HAVE_FUNCTION_TRACER
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PERF_EVENTS
+ select COMMON_CLK
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
@@ -80,7 +81,6 @@
config XTENSA_VARIANT_FSF
bool "fsf - default (not generic) configuration"
select MMU
- select HAVE_XTENSA_GPIO32
config XTENSA_VARIANT_DC232B
bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
@@ -135,7 +135,6 @@
config SMP
bool "Enable Symmetric multi-processing support"
depends on HAVE_SMP
- select USE_GENERIC_SMP_HELPERS
select GENERIC_SMP_IDLE_THREAD
help
Enabled SMP Software; allows more than one CPU/CORE
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index 46b4f5e..e7370b1 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -35,6 +35,13 @@
interrupt-controller;
};
+ clocks {
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+ };
+
serial0: serial@fd050020 {
device_type = "serial";
compatible = "ns16550a";
@@ -42,9 +49,7 @@
reg = <0xfd050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
- /* Filled in by platform_setup from FPGA register
- * clock-frequency = <100000000>;
- */
+ clocks = <&osc>;
};
enet0: ethoc@fd030000 {
@@ -52,5 +57,6 @@
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
};
};
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 0a337e4..c3d20ba 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -9,6 +9,7 @@
generic-y += exec.h
generic-y += fcntl.h
generic-y += hardirq.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += kdebug.h
@@ -17,7 +18,9 @@
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += percpu.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -27,5 +30,3 @@
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 2a042d4..7494420 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -25,7 +25,7 @@
#ifdef CONFIG_MMU
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 8c194f6..677bfcf 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -23,25 +23,37 @@
static inline void spill_registers(void)
{
-
+#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
- "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
- "mov a12, a0\n\t"
- "rsr a13, sar\n\t"
- "xsr a14, ps\n\t"
- "movi a0, _spill_registers\n\t"
- "rsync\n\t"
- "callx0 a0\n\t"
- "mov a0, a12\n\t"
- "wsr a13, sar\n\t"
- "wsr a14, ps\n\t"
- : :
-#if defined(CONFIG_FRAME_POINTER)
- : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
-#else
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+ " call12 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " _entry a1, 48\n"
+ " addi a12, a0, 3\n"
+#if XCHAL_NUM_AREGS > 32
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
#endif
- "memory");
+ " _entry a1, 48\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a8, a8\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a4, a4\n"
+#endif
+ " retw\n"
+ "2:\n"
+ : : : "a12", "a13", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
}
#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 5791b45..f74ddfb 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -25,7 +25,7 @@
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x10000000
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 51940fe..b939552 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -734,7 +734,12 @@
#define __NR_accept4 333
__SYSCALL(333, sys_accept4, 4)
-#define __NR_syscall_count 334
+#define __NR_sched_setattr 334
+__SYSCALL(334, sys_sched_setattr, 2)
+#define __NR_sched_getattr 335
+__SYSCALL(335, sys_sched_getattr, 3)
+
+#define __NR_syscall_count 336
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 21dbe6b..ef7f499 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1081,34 +1081,202 @@
rsr a0, sar
s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_SAR
+
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+
s32i a4, a2, PT_AREG4
- s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
-
- /* The spill routine might clobber a7, a11, and a15. */
-
s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
s32i a15, a2, PT_AREG15
- call0 _spill_registers # destroys a3, a4, and SAR
+ /*
+ * Rotate ws so that the current windowbase is at bit 0.
+ * Assume ws = xxxwww1yy (www1 current window frame).
+ * Rotate ws right so that a4 = yyxxxwww1.
+ */
+
+ rsr a0, windowbase
+ rsr a3, windowstart # a3 = xxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
+ srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
+
+ /* We are done if there are no more than the current register frame. */
+
+ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
+ movi a0, (1 << (WSBITS-1))
+ _beqz a3, .Lnospill # only one active frame? jump
+
+ /* We want 1 at the top, so that we return to the current windowbase */
+
+ or a3, a3, a0 # 1yyxxxwww
+
+ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+ wsr a3, windowstart # save shifted windowstart
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
+
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
+ movi a3, WSBITS
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
+
+ rsr a3, windowbase
+ add a3, a3, a0
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, windowstart
+ srl a3, a3 # shift windowstart
+
+ /* WB is now just one frame below the oldest frame in the register
+ window. WS is shifted so the oldest frame is in bit 0, thus, WB
+ and WS differ by one 4-register frame. */
+
+ /* Save frames. Depending what call was used (call4, call8, call12),
+ * we have to save 4,8. or 12 registers.
+ */
+
+
+.Lloop: _bbsi.l a3, 1, .Lc4
+ _bbci.l a3, 2, .Lc12
+
+.Lc8: s32e a4, a13, -16
+ l32e a4, a5, -12
+ s32e a8, a4, -32
+ s32e a5, a13, -12
+ s32e a6, a13, -8
+ s32e a7, a13, -4
+ s32e a9, a4, -28
+ s32e a10, a4, -24
+ s32e a11, a4, -20
+ srli a11, a3, 2 # shift windowbase by 2
+ rotw 2
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc4: s32e a4, a9, -16
+ s32e a5, a9, -12
+ s32e a6, a9, -8
+ s32e a7, a9, -4
+
+ srli a7, a3, 1
+ rotw 1
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
+
+ /* 12-register frame (call12) */
+
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
+
+ s32e a9, a8, -44
+ s32e a10, a8, -40
+ s32e a11, a8, -36
+ s32e a12, a8, -32
+ s32e a13, a8, -28
+ s32e a14, a8, -24
+ s32e a15, a8, -20
+ srli a15, a3, 3
+
+ /* The stack pointer for a4..a7 is out of reach, so we rotate the
+ * window, grab the stackpointer, and rotate back.
+ * Alternatively, we could also use the following approach, but that
+ * makes the fixup routine much more complicated:
+ * rotw 1
+ * s32e a0, a13, -16
+ * ...
+ * rotw 2
+ */
+
+ rotw 1
+ mov a4, a13
+ rotw -1
+
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
+
+ rotw 3
+
+ _beqi a3, 1, .Lexit
+ j .Lloop
+
+.Lexit:
+
+ /* Done. Do the final rotation and set WS */
+
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
/* Advance PC, restore registers and SAR, and return from exception. */
- l32i a3, a2, PT_AREG5
- l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_SAR
l32i a0, a2, PT_AREG0
wsr a3, sar
l32i a3, a2, PT_AREG3
/* Restore clobbered registers. */
+ l32i a4, a2, PT_AREG4
l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
l32i a15, a2, PT_AREG15
movi a2, 0
rfe
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
+ * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+ */
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, windowstart
+ wsr a1, windowbase
+ rsync
+
+ movi a0, 0
+
+ rsr a3, excsave1
+ l32i a1, a3, EXC_TABLE_KSTK
+
+ movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a4, ps
+ rsync
+
+ movi a6, SIGSEGV
+ movi a4, do_exit
+ callx4 a4
+
+ /* shouldn't return, so panic */
+
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0 # should not return
+1: j 1b
+
+
ENDPROC(fast_syscall_spill_registers)
/* Fixup handler.
@@ -1117,6 +1285,13 @@
* We basically restore WINDOWBASE and WINDOWSTART to the condition when
* we entered the spill routine and jump to the user exception handler.
*
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
* a0: value of depc, original value in depc
* a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable, original value in excsave1
@@ -1131,10 +1306,15 @@
/* We need to make sure the current registers (a0-a3) are preserved.
* To do this, we simply set the bit for the current window frame
* in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
*/
xsr a3, excsave1 # get spill-mask
slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
slli a2, a3, 32-WSBITS
src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
@@ -1220,209 +1400,6 @@
ENDPROC(fast_syscall_spill_registers_fixup_return)
-/*
- * spill all registers.
- *
- * This is not a real function. The following conditions must be met:
- *
- * - must be called with call0.
- * - uses a3, a4 and SAR.
- * - the last 'valid' register of each frame are clobbered.
- * - the caller must have registered a fixup handler
- * (or be inside a critical section)
- * - PS_EXCM must be set (PS_WOE cleared?)
- */
-
-ENTRY(_spill_registers)
-
- /*
- * Rotate ws so that the current windowbase is at bit 0.
- * Assume ws = xxxwww1yy (www1 current window frame).
- * Rotate ws right so that a4 = yyxxxwww1.
- */
-
- rsr a4, windowbase
- rsr a3, windowstart # a3 = xxxwww1yy
- ssr a4 # holds WB
- slli a4, a3, WSBITS
- or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
- srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
-
- /* We are done if there are no more than the current register frame. */
-
- extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
- movi a4, (1 << (WSBITS-1))
- _beqz a3, .Lnospill # only one active frame? jump
-
- /* We want 1 at the top, so that we return to the current windowbase */
-
- or a3, a3, a4 # 1yyxxxwww
-
- /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
-
- wsr a3, windowstart # save shifted windowstart
- neg a4, a3
- and a3, a4, a3 # first bit set from right: 000010000
-
- ffs_ws a4, a3 # a4: shifts to skip empty frames
- movi a3, WSBITS
- sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
- ssr a4 # save in SAR for later.
-
- rsr a3, windowbase
- add a3, a3, a4
- wsr a3, windowbase
- rsync
-
- rsr a3, windowstart
- srl a3, a3 # shift windowstart
-
- /* WB is now just one frame below the oldest frame in the register
- window. WS is shifted so the oldest frame is in bit 0, thus, WB
- and WS differ by one 4-register frame. */
-
- /* Save frames. Depending what call was used (call4, call8, call12),
- * we have to save 4,8. or 12 registers.
- */
-
- _bbsi.l a3, 1, .Lc4
- _bbsi.l a3, 2, .Lc8
-
- /* Special case: we have a call12-frame starting at a4. */
-
- _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
-
- s32e a4, a1, -16 # a1 is valid with an empty spill area
- l32e a4, a5, -12
- s32e a8, a4, -48
- mov a8, a4
- l32e a4, a1, -16
- j .Lc12c
-
-.Lnospill:
- ret
-
-.Lloop: _bbsi.l a3, 1, .Lc4
- _bbci.l a3, 2, .Lc12
-
-.Lc8: s32e a4, a13, -16
- l32e a4, a5, -12
- s32e a8, a4, -32
- s32e a5, a13, -12
- s32e a6, a13, -8
- s32e a7, a13, -4
- s32e a9, a4, -28
- s32e a10, a4, -24
- s32e a11, a4, -20
-
- srli a11, a3, 2 # shift windowbase by 2
- rotw 2
- _bnei a3, 1, .Lloop
-
-.Lexit: /* Done. Do the final rotation, set WS, and return. */
-
- rotw 1
- rsr a3, windowbase
- ssl a3
- movi a3, 1
- sll a3, a3
- wsr a3, windowstart
- ret
-
-.Lc4: s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
-
- srli a7, a3, 1
- rotw 1
- _bnei a3, 1, .Lloop
- j .Lexit
-
-.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
-
- /* 12-register frame (call12) */
-
- l32e a2, a5, -12
- s32e a8, a2, -48
- mov a8, a2
-
-.Lc12c: s32e a9, a8, -44
- s32e a10, a8, -40
- s32e a11, a8, -36
- s32e a12, a8, -32
- s32e a13, a8, -28
- s32e a14, a8, -24
- s32e a15, a8, -20
- srli a15, a3, 3
-
- /* The stack pointer for a4..a7 is out of reach, so we rotate the
- * window, grab the stackpointer, and rotate back.
- * Alternatively, we could also use the following approach, but that
- * makes the fixup routine much more complicated:
- * rotw 1
- * s32e a0, a13, -16
- * ...
- * rotw 2
- */
-
- rotw 1
- mov a5, a13
- rotw -1
-
- s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
-
- rotw 3
-
- _beqi a3, 1, .Lexit
- j .Lloop
-
-.Linvalid_mask:
-
- /* We get here because of an unrecoverable error in the window
- * registers. If we are in user space, we kill the application,
- * however, this condition is unrecoverable in kernel space.
- */
-
- rsr a0, ps
- _bbci.l a0, PS_UM_BIT, 1f
-
- /* User space: Setup a dummy frame and kill application.
- * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
- */
-
- movi a0, 1
- movi a1, 0
-
- wsr a0, windowstart
- wsr a1, windowbase
- rsync
-
- movi a0, 0
-
- rsr a3, excsave1
- l32i a1, a3, EXC_TABLE_KSTK
-
- movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
- wsr a4, ps
- rsync
-
- movi a6, SIGSEGV
- movi a4, do_exit
- callx4 a4
-
-1: /* Kernel space: PANIC! */
-
- wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0 # should not return
-1: j 1b
-
-ENDPROC(_spill_registers)
-
#ifdef CONFIG_MMU
/*
* We should never get here. Bail out!
@@ -1794,6 +1771,43 @@
ENDPROC(system_call)
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
/*
* Task switch.
@@ -1806,21 +1820,20 @@
entry a1, 16
- mov a12, a2 # preserve 'prev' (a2)
- mov a13, a3 # and 'next' (a3)
+ mov a10, a2 # preserve 'prev' (a2)
+ mov a11, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO
l32i a5, a3, TASK_THREAD_INFO
- save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- s32i a0, a12, THREAD_RA # save return address
- s32i a1, a12, THREAD_SP # save stack pointer
+ s32i a0, a10, THREAD_RA # save return address
+ s32i a1, a10, THREAD_SP # save stack pointer
/* Disable ints while we manipulate the stack pointer. */
- movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
- xsr a14, ps
+ rsil a14, LOCKLEVEL
rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
@@ -1835,7 +1848,7 @@
/* Flush register file. */
- call0 _spill_registers # destroys a3, a4, and SAR
+ spill_registers_kernel
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -1851,13 +1864,13 @@
/* restore context of the task 'next' */
- l32i a0, a13, THREAD_RA # restore return address
- l32i a1, a13, THREAD_SP # restore stack pointer
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
- load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
wsr a14, ps
- mov a2, a12 # return 'prev'
+ mov a2, a10 # return 'prev'
rsync
retw
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 7d12af1..84fe931 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
+#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
@@ -276,6 +277,7 @@
static int __init xtensa_device_probe(void)
{
+ of_clk_init(NULL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 08b769d..2a1823d 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -30,6 +30,7 @@
#include <asm/platform.h>
unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
static cycle_t ccount_read(struct clocksource *cs)
{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index cb8fd44..f9e1ec3 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -235,7 +235,7 @@
/* Check for overflow/underflow exception, jump if overflow. */
- _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
/*
* Restart window underflow exception.
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 74a60c7..80b33ed 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -122,9 +122,7 @@
EXPORT_SYMBOL(insl);
extern long common_exception_return;
-extern long _spill_registers;
EXPORT_SYMBOL(common_exception_return);
-EXPORT_SYMBOL(_spill_registers);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 479d753..aff108d 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -90,7 +90,7 @@
/*
- * Initialize the bootmem system and give it all the memory we have available.
+ * Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
@@ -142,9 +142,14 @@
/* Add all remaining memory pieces into the bootmem map */
- for (i=0; i<sysmem.nr_banks; i++)
- free_bootmem(sysmem.bank[i].start,
- sysmem.bank[i].end - sysmem.bank[i].start);
+ for (i = 0; i < sysmem.nr_banks; i++) {
+ if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
+ unsigned long end = min(max_low_pfn << PAGE_SHIFT,
+ sysmem.bank[i].end);
+ free_bootmem(sysmem.bank[i].start,
+ end - sysmem.bank[i].start);
+ }
+ }
}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 36ec171..861203e 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -39,7 +39,7 @@
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 8002278..57fd08b 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -135,11 +135,11 @@
static int __init machine_setup(void)
{
- struct device_node *serial;
+ struct device_node *clock;
struct device_node *eth = NULL;
- for_each_compatible_node(serial, NULL, "ns16550a")
- update_clock_frequency(serial);
+ for_each_node_by_name(clock, "main-oscillator")
+ update_clock_frequency(clock);
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
@@ -290,6 +290,7 @@
* knows whether they set it correctly on the DIP switches.
*/
pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+ ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
return 0;
}
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
index bf40201..244cdea 100644
--- a/arch/xtensa/variants/fsf/include/variant/tie.h
+++ b/arch/xtensa/variants/fsf/include/variant/tie.h
@@ -18,13 +18,6 @@
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
-/* Basic parameters of each coprocessor: */
-#define XCHAL_CP7_NAME "XTIOP"
-#define XCHAL_CP7_IDENT XTIOP
-#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
-#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
-#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
-
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
@@ -42,6 +35,8 @@
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_SIZE 0
+#define XCHAL_CP7_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
diff --git a/block/blk-core.c b/block/blk-core.c
index 853f927..bfe16d5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@
if (!uninit_q)
return NULL;
- uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
- if (!uninit_q->flush_rq)
- goto out_cleanup_queue;
-
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- goto out_free_flush_rq;
- return q;
+ blk_cleanup_queue(uninit_q);
-out_free_flush_rq:
- kfree(uninit_q->flush_rq);
-out_cleanup_queue:
- blk_cleanup_queue(uninit_q);
- return NULL;
+ return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -717,9 +708,13 @@
if (!q)
return NULL;
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- return NULL;
+ goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
+
+fail:
+ kfree(q->flush_rq);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613b..dbf4502 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, at_head, true);
+ blk_mq_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b69..43e6b47 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,17 +137,20 @@
rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_run_request(rq, true, false);
+ blk_mq_insert_request(rq, false, true, false);
}
-static bool blk_flush_queue_rq(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
}
@@ -193,7 +196,7 @@
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq);
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@
q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(q->flush_rq);
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -411,7 +414,7 @@
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_run_request(rq, false, true);
+ blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146bef..136ef86 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
@@ -32,16 +32,16 @@
{
BUG_ON(!notifier->notify);
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(¬ifier->list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd1..883f720 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@
set_bit(ctx->index_hw, hctx->ctx_map);
}
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
- bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
@@ -193,12 +193,6 @@
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- gfp_t gfp, bool reserved)
-{
- return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
@@ -289,38 +283,10 @@
__blk_mq_free_request(hctx, ctx, rq);
}
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
-
- /* don't actually finish bio if it's part of flush sequence */
- if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
- bio_endio(bio, error);
-}
-
-void blk_mq_end_io(struct request *rq, int error)
-{
- struct bio *bio = rq->bio;
- unsigned int bytes = 0;
-
- trace_block_rq_complete(rq->q, rq);
-
- while (bio) {
- struct bio *next = bio->bi_next;
-
- bio->bi_next = NULL;
- bytes += bio->bi_iter.bi_size;
- blk_mq_bio_endio(rq, bio, error);
- bio = next;
- }
-
- blk_account_io_completion(rq, bytes);
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ return true;
blk_account_io_done(rq);
@@ -328,8 +294,9 @@
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
+ return false;
}
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_io_partial);
static void __blk_mq_complete_request_remote(void *data)
{
@@ -730,60 +697,27 @@
blk_mq_add_timer(rq);
}
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool at_head, bool run_queue)
-{
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
-
- ctx = rq->mq_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
- blk_insert_flush(rq);
- } else {
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq->mq_ctx = ctx;
- }
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, at_head);
- spin_unlock(&ctx->lock);
-
- blk_mq_put_ctx(current_ctx);
- }
-
- if (run_queue)
- __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
- ctx = rq->mq_ctx;
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- rq->mq_ctx = ctx;
- }
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- /* ctx->cpu might be offline */
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, false);
- spin_unlock(&ctx->lock);
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
+ blk_insert_flush(rq);
+ } else {
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq, at_head);
+ spin_unlock(&ctx->lock);
+ }
blk_mq_put_ctx(current_ctx);
@@ -926,6 +860,8 @@
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ if (is_sync)
+ rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ed0035c..72beba1 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@
};
void __blk_mq_complete_request(struct request *rq);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e7515aa..6f190bc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -243,6 +243,8 @@
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#else
+#define acpi_ac_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 018a428..797a693 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -841,6 +841,8 @@
acpi_battery_update(battery);
return 0;
}
+#else
+#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 10e4964..afec452 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -260,14 +260,6 @@
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 15R SE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -322,56 +314,6 @@
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ProBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 15",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 17",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 8780w",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
- },
- },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 11c11f6..714e957 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -80,6 +80,8 @@
#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#else
+#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index e9b3081..5bfd769 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -713,13 +713,11 @@
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct acpi_device *tmp;
-
struct dock_station *dock_station = dev->platform_data;
+ struct acpi_device *adev = NULL;
- if (!acpi_bus_get_device(dock_station->handle, &tmp))
- return snprintf(buf, PAGE_SIZE, "1\n");
- return snprintf(buf, PAGE_SIZE, "0\n");
+ acpi_bus_get_device(dock_station->handle, &adev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 959d41a..d7d32c2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -67,6 +67,8 @@
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
+ * when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -116,6 +118,7 @@
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
@@ -440,6 +443,29 @@
EXPORT_SYMBOL(ec_get_handle);
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+
+/*
+ * Clears stale _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (status || !value)
+ break;
+ }
+
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@
mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
+
mutex_unlock(&ec->mutex);
}
@@ -821,6 +851,13 @@
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+ /* Clear stale _Q events if hardware might require that */
+ if (EC_FLAGS_CLEAR_ON_RESUME) {
+ mutex_lock(&ec->mutex);
+ acpi_ec_clear(ec);
+ mutex_unlock(&ec->mutex);
+ }
return ret;
}
@@ -922,6 +959,30 @@
return 0;
}
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 1fb6290..09e423f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -55,6 +55,9 @@
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+#else
+#define acpi_fan_suspend NULL
+#define acpi_fan_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 52d45ea..361b40c 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -430,6 +430,7 @@
pin_name(pin));
}
+ kfree(entry);
return 0;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 28baa05..84243c3 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -56,6 +56,12 @@
int target_state; /* target T-state */
};
+struct acpi_processor_throttling_arg {
+ struct acpi_processor *pr;
+ int target_state;
+ bool force;
+};
+
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
@@ -1060,16 +1066,24 @@
return 0;
}
+static long acpi_processor_throttling_fn(void *data)
+{
+ struct acpi_processor_throttling_arg *arg = data;
+ struct acpi_processor *pr = arg->pr;
+
+ return pr->throttling.acpi_processor_set_throttling(pr,
+ arg->target_state, arg->force);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
+ struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
- cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1080,14 +1094,6 @@
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
- return -ENOMEM;
- }
-
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
@@ -1096,17 +1102,15 @@
return -ENODEV;
}
- cpumask_copy(saved_mask, ¤t->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpumask_and(online_throttling_cpus, cpu_online_mask,
- p_throttling->shared_cpu_map);
+
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1118,21 +1122,18 @@
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the pr->id CPU. Exit */
- ret = -ENODEV;
- goto exit;
- }
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state, force);
+ arg.pr = pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask,
+ p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1153,13 +1154,12 @@
"on CPU %d\n", i));
continue;
}
- t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
- continue;
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state, force);
+
+ arg.pr = match_pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg);
}
}
/*
@@ -1168,17 +1168,12 @@
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
-exit:
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
+
return ret;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc..0bdacc5 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
+ if (!memory24->address_length)
+ return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
+ if (!memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
+ if (!fixed_memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
@@ -144,12 +150,16 @@
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
+ if (!io->address_length)
+ return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
+ if (!fixed_io->address_length)
+ return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index d465ae6..dbd4849 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -450,7 +450,7 @@
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
@@ -668,6 +668,8 @@
acpi_sbs_callback(sbs);
return 0;
}
+#else
+#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b718806..c40fb2e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -71,6 +71,17 @@
return 0;
}
+static bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
+ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
+ || (acpi_gbl_FADT.sleep_control.address
+ && acpi_gbl_FADT.sleep_status.address));
+}
+
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
@@ -604,15 +615,9 @@
{
int i;
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
+ if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- }
- }
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
@@ -740,11 +745,7 @@
static void acpi_sleep_hibernate_setup(void)
{
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_FAILURE(status))
+ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
return;
hibernation_set_ops(old_suspend_ordering ?
@@ -793,8 +794,6 @@
int __init acpi_sleep_init(void)
{
- acpi_status status;
- u8 type_a, type_b;
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
@@ -806,8 +805,7 @@
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8349a55..08626c8 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -102,6 +102,8 @@
#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
+#else
+#define acpi_thermal_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index b727d10..b6ba88e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -81,11 +81,12 @@
module_param(allow_duplicates, bool, 0644);
/*
- * For Windows 8 systems: if set ture and the GPU driver has
- * registered a backlight interface, skip registering ACPI video's.
+ * For Windows 8 systems: used to decide if video module
+ * should skip registering backlight interface of its own.
*/
-static bool use_native_backlight = false;
-module_param(use_native_backlight, bool, 0644);
+static int use_native_backlight_param = -1;
+module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
+static bool use_native_backlight_dmi = false;
static int register_count;
static struct mutex video_list_lock;
@@ -231,9 +232,17 @@
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_use_native_backlight(void)
+{
+ if (use_native_backlight_param != -1)
+ return use_native_backlight_param;
+ else
+ return use_native_backlight_dmi;
+}
+
static bool acpi_video_verify_backlight_support(void)
{
- if (acpi_osi_is_win8() && use_native_backlight &&
+ if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@@ -398,6 +407,12 @@
return 0;
}
+static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = true;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -442,6 +457,120 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T430s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X1 Carbon",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire 5733Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-431",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4340s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 8780w",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
{}
};
@@ -685,6 +814,7 @@
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
+ u32 value;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@@ -715,7 +845,12 @@
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
- br->levels[count] = (u32) o->integer.value;
+ value = (u32) o->integer.value;
+ /* Skip duplicate entries */
+ if (count > 2 && br->levels[count - 1] == value)
+ continue;
+
+ br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a697b77..19080c8 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,22 +168,6 @@
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "HP EliteBook Revolve 810",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
- },
- },
- {
- .callback = video_detect_force_vendor,
- .ident = "Lenovo Yoga 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
- },
- },
{ },
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4e73772..868429a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -247,6 +247,7 @@
config SATA_MV
tristate "Marvell SATA support"
+ select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc2756f..c81d809 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -61,6 +61,7 @@
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@@ -121,6 +122,13 @@
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_noncq] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
@@ -452,6 +460,12 @@
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ */
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -1170,8 +1184,10 @@
nvec = rc;
rc = pci_enable_msi_block(pdev, nvec);
- if (rc)
+ if (rc < 0)
goto intx;
+ else if (rc > 0)
+ goto single_msi;
return nvec;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1..8cb2522 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4175,6 +4175,7 @@
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 20fd337..7ccc084 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -447,8 +447,11 @@
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
- } else if (vendor == 0x197b && devid == 0x2352) {
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+ /*
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+ * 0x0325: jmicron JMB394.
+ */
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 26386f0..b0b18ec 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -119,7 +119,9 @@
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -212,7 +214,9 @@
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- clk_prepare_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 52b8181..05c8a44 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4104,7 +4104,6 @@
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
@@ -4132,13 +4131,18 @@
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
if (rc != -EPROBE_DEFER)
- dev_warn(&pdev->dev, "error getting phy %d",
- rc);
+ dev_warn(&pdev->dev, "error getting phy %d", rc);
+
+ /* Cleanup only the initialized ports */
+ hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
+ /* All the ports have been initialized */
+ hpriv->n_ports = n_ports;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4176,7 +4180,7 @@
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
- for (port = 0; port < n_ports; port++) {
+ for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index d67fc35..b7695e8 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -157,6 +157,7 @@
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a97ddf..c30df50e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1580,6 +1580,7 @@
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8184451..422b7d8 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -874,7 +874,7 @@
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -887,7 +887,7 @@
struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) {
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 5160269..d777bb7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4498,7 +4498,7 @@
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
- cpu_to_node(smp_processor_id()), smp_processor_id());
+ cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b52e9a6..54174cb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -53,7 +53,7 @@
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
/* unaligned IO handling */
-#define MTIP_MAX_UNALIGNED_SLOTS 8
+#define MTIP_MAX_UNALIGNED_SLOTS 2
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b365e0d..34898d5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2109,7 +2109,6 @@
rbd_assert(img_request->obj_request_count > 0);
rbd_assert(which != BAD_WHICH);
rbd_assert(which < img_request->obj_request_count);
- rbd_assert(which >= img_request->next_completion);
spin_lock_irq(&img_request->completion_lock);
if (which != img_request->next_completion)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 011e55d..51c557c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -612,6 +612,8 @@
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
+ if (!meta)
+ return -ENOMEM;
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index bd313f7..c1af80b 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -242,7 +242,7 @@
irq = irq_of_parse_and_map(np, 0);
if (!irq)
- return;
+ goto out_free_characteristics;
clk = at91_clk_register_master(pmc, irq, name, num_parents,
parent_names, layout,
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6a934a5..05e04ce 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -494,6 +494,9 @@
static int __init nomadik_src_clk_init_debugfs(void)
{
+ /* Vital for multiplatform */
+ if (!src_base)
+ return -ENODEV;
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5517944..c42e608 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2226,24 +2226,25 @@
*/
int __clk_get(struct clk *clk)
{
- if (clk && !try_module_get(clk->owner))
- return 0;
+ if (clk) {
+ if (!try_module_get(clk->owner))
+ return 0;
- kref_get(&clk->ref);
+ kref_get(&clk->ref);
+ }
return 1;
}
void __clk_put(struct clk *clk)
{
- if (WARN_ON_ONCE(IS_ERR(clk)))
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
- if (clk)
- module_put(clk->owner);
+ module_put(clk->owner);
}
/*** clk rate change notifiers ***/
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 17a5983..86f1e36 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -179,6 +179,7 @@
init.name = name;
init.ops = &clk_psc_ops;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 81a202d..bef198a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -141,13 +141,6 @@
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
};
-static void __init a370_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &a370_coreclks);
-}
-CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
- a370_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -168,9 +161,15 @@
{ }
};
-static void __init a370_clk_gating_init(struct device_node *np)
+static void __init a370_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, a370_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
+
+ mvebu_coreclk_setup(np, &a370_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, a370_gating_desc);
}
-CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
- a370_clk_gating_init);
+CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 9922c44..b309431 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -158,13 +158,6 @@
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static void __init axp_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &axp_coreclks);
-}
-CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
- axp_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -202,9 +195,14 @@
{ }
};
-static void __init axp_clk_gating_init(struct device_node *np)
+static void __init axp_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, axp_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
+
+ mvebu_coreclk_setup(np, &axp_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, axp_gating_desc);
}
-CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
- axp_clk_gating_init);
+CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 38aee1e..b8c2424 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -154,12 +154,6 @@
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
};
-static void __init dove_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &dove_coreclks);
-}
-CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -186,9 +180,14 @@
{ }
};
-static void __init dove_clk_gating_init(struct device_node *np)
+static void __init dove_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, dove_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
+
+ mvebu_coreclk_setup(np, &dove_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, dove_gating_desc);
}
-CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
- dove_clk_gating_init);
+CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 2636a55..ddb666a 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -193,13 +193,6 @@
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init kirkwood_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &kirkwood_coreclks);
-}
-CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
- kirkwood_coreclk_init);
-
static const struct coreclk_soc_desc mv88f6180_coreclks = {
.get_tclk_freq = kirkwood_get_tclk_freq,
.get_cpu_freq = mv88f6180_get_cpu_freq,
@@ -208,13 +201,6 @@
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init mv88f6180_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &mv88f6180_coreclks);
-}
-CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
- mv88f6180_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -239,9 +225,21 @@
{ }
};
-static void __init kirkwood_clk_gating_init(struct device_node *np)
+static void __init kirkwood_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, kirkwood_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
+
+
+ if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
+ mvebu_coreclk_setup(np, &mv88f6180_coreclks);
+ else
+ mvebu_coreclk_setup(np, &kirkwood_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
}
-CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
- kirkwood_clk_gating_init);
+CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+ kirkwood_clk_init);
+CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
+ kirkwood_clk_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index a59ec21..99c27b1 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -26,6 +26,8 @@
void __iomem *reg;
};
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
#define CPG_SDCKCR 0x00000074
#define CPG_PLL0CR 0x000000d8
#define CPG_FRQCRC 0x000000e0
@@ -45,6 +47,7 @@
struct cpg_z_clk {
struct clk_hw hw;
void __iomem *reg;
+ void __iomem *kick_reg;
};
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -83,17 +86,45 @@
{
struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult;
- u32 val;
+ u32 val, kick;
+ unsigned int i;
mult = div_u64((u64)rate * 32, parent_rate);
mult = clamp(mult, 1U, 32U);
+ if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
val = clk_readl(zclk->reg);
val &= ~CPG_FRQCRC_ZFC_MASK;
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
clk_writel(val, zclk->reg);
- return 0;
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = clk_readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ clk_writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependant of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
}
static const struct clk_ops cpg_z_clk_ops = {
@@ -120,6 +151,7 @@
init.num_parents = 1;
zclk->reg = cpg->reg + CPG_FRQCRC;
+ zclk->kick_reg = cpg->reg + CPG_FRQCRB;
zclk->hw.init = &init;
clk = clk_register(NULL, &zclk->hw);
@@ -186,7 +218,7 @@
const char *name)
{
const struct clk_div_table *table = NULL;
- const char *parent_name = "main";
+ const char *parent_name;
unsigned int shift;
unsigned int mult = 1;
unsigned int div = 1;
@@ -201,23 +233,31 @@
* the multiplier value.
*/
u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ parent_name = "main";
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
} else if (!strcmp(name, "pll1")) {
+ parent_name = "main";
mult = config->pll1_mult / 2;
} else if (!strcmp(name, "pll3")) {
+ parent_name = "main";
mult = config->pll3_mult;
} else if (!strcmp(name, "lb")) {
+ parent_name = "pll1_div2";
div = cpg_mode & BIT(18) ? 36 : 24;
} else if (!strcmp(name, "qspi")) {
+ parent_name = "pll1_div2";
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
- ? 16 : 20;
+ ? 8 : 10;
} else if (!strcmp(name, "sdh")) {
+ parent_name = "pll1_div2";
table = cpg_sdh_div_table;
shift = 8;
} else if (!strcmp(name, "sd0")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 4;
} else if (!strcmp(name, "sd1")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 0;
} else if (!strcmp(name, "z")) {
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 4d75b1f..290f9c1 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -59,7 +59,7 @@
return 0;
if (divider_ux1 > get_max_div(divider))
- return -EINVAL;
+ return get_max_div(divider);
return divider_ux1;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index cf0c323..c39613c 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -180,9 +180,13 @@
tegra_clk_sbc6_8,
tegra_clk_sclk,
tegra_clk_sdmmc1,
+ tegra_clk_sdmmc1_8,
tegra_clk_sdmmc2,
+ tegra_clk_sdmmc2_8,
tegra_clk_sdmmc3,
+ tegra_clk_sdmmc3_8,
tegra_clk_sdmmc4,
+ tegra_clk_sdmmc4_8,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 5c35885..1fa5c3f 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -371,9 +371,7 @@
static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
"pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
};
-static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
+#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
@@ -465,6 +463,10 @@
MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+ MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
+ MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
+ MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
+ MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -492,7 +494,7 @@
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
- UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
+ UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 05dce4a..feb3201 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -120,7 +120,7 @@
ARRAY_SIZE(cclk_lp_parents),
CLK_SET_RATE_PARENT,
clk_base + CCLKLP_BURST_POLICY,
- 0, 4, 8, 9, NULL);
+ TEGRA_DIVIDER_2, 4, 8, 9, NULL);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 90d9d25..80431f0 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -682,12 +682,12 @@
[tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
[tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
@@ -723,7 +723,7 @@
[tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
[tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
[tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
[tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index aff86b5..166e02f 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -516,11 +516,11 @@
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- {12000000, 216000000, 432, 12, 1, 8},
- {13000000, 216000000, 432, 13, 1, 8},
- {16800000, 216000000, 360, 14, 1, 8},
- {19200000, 216000000, 360, 16, 1, 8},
- {26000000, 216000000, 432, 26, 1, 8},
+ {12000000, 408000000, 408, 12, 0, 8},
+ {13000000, 408000000, 408, 13, 0, 8},
+ {16800000, 408000000, 340, 14, 0, 8},
+ {19200000, 408000000, 340, 16, 0, 8},
+ {26000000, 408000000, 408, 26, 0, 8},
{0, 0, 0, 0, 0, 0},
};
@@ -570,6 +570,15 @@
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
+static struct div_nmp plld_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 11,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
{12000000, 216000000, 864, 12, 4, 12},
{13000000, 216000000, 864, 13, 4, 12},
@@ -603,19 +612,18 @@
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
- .div_nmp = &pllp_nmp,
+ .div_nmp = &plld_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
- { 12000000, 148500000, 99, 1, 8},
- { 12000000, 594000000, 99, 1, 1},
- { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */
- { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */
- { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */
- { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */
+ { 12000000, 594000000, 99, 1, 2},
+ { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */
+ { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -753,21 +761,19 @@
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
- [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
[tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
- [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
[tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
[tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
- [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
[tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
[tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
@@ -794,7 +800,7 @@
[tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
[tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
[tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
@@ -1286,9 +1292,9 @@
clk_register_clkdev(clk, "pll_d2", NULL);
clks[TEGRA124_CLK_PLL_D2] = clk;
- /* PLLD2_OUT0 ?? */
+ /* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
- CLK_SET_RATE_PARENT, 1, 2);
+ CLK_SET_RATE_PARENT, 1, 1);
clk_register_clkdev(clk, "pll_d2_out0", NULL);
clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index dbace15..dace2b1 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -574,6 +574,8 @@
[tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
[tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 02821b0..a918bc4 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@
static u64 pit_read_sched_clock(void)
{
- return __raw_readl(clksrc_base + PITCVAL);
+ return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 08ca8c9..199b52b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1109,12 +1109,27 @@
goto err_set_policy_cpu;
}
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
+
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
@@ -1162,20 +1177,6 @@
}
}
- /* related cpus should atleast have policy->cpus */
- cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
- /*
- * affected cpus must always be the one, which are online. We aren't
- * managing offline cpus here.
- */
- cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
-
- if (!frozen) {
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
- }
-
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1206,6 +1207,7 @@
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
+ up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1323,8 +1325,7 @@
up_read(&policy->rwsem);
if (cpu != policy->cpu) {
- if (!frozen)
- sysfs_remove_link(&dev->kobj, "cpufreq");
+ sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
@@ -1547,23 +1548,16 @@
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
- if (cpufreq_disabled() || !cpufreq_driver)
- return -ENOENT;
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(cpu);
+ up_read(&policy->rwsem);
- BUG_ON(!policy);
-
- if (!down_read_trylock(&cpufreq_rwsem))
- return 0;
-
- down_read(&policy->rwsem);
-
- ret_freq = __cpufreq_get(cpu);
-
- up_read(&policy->rwsem);
- up_read(&cpufreq_rwsem);
+ cpufreq_cpu_put(policy);
+ }
return ret_freq;
}
@@ -2149,7 +2143,7 @@
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
if (!policy->cur) {
pr_debug("Driver did not initialize current freq");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c788abf..2cd36b9 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,12 +34,15 @@
#define SAMPLE_COUNT 3
-#define BYT_RATIOS 0x66a
-#define BYT_VIDS 0x66b
+#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
+#define BYT_TURBO_RATIOS 0x66c
-#define FRAC_BITS 8
+
+#define FRAC_BITS 6
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -357,7 +360,7 @@
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return value & 0xFF;
+ return (value >> 8) & 0xFF;
}
static int byt_get_max_pstate(void)
@@ -367,6 +370,13 @@
return (value >> 16) & 0xFF;
}
+static int byt_get_turbo_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_TURBO_RATIOS, value);
+ return value & 0x3F;
+}
+
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -469,7 +479,7 @@
.funcs = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
- .get_turbo = byt_get_max_pstate,
+ .get_turbo = byt_get_turbo_pstate,
.set = byt_set_pstate,
.get_vid = byt_get_vid,
},
@@ -547,18 +557,20 @@
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
- u64 core_pct;
- u64 c0_pct;
+ int32_t core_pct;
+ int32_t c0_pct;
- core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+ core_pct = div_fp(int_tofp((sample->aperf)),
+ int_tofp((sample->mperf)));
+ core_pct = mul_fp(core_pct, int_tofp(100));
+ FP_ROUNDUP(core_pct);
- c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
+ c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
+
sample->freq = fp_toint(
- mul_fp(int_tofp(cpu->pstate.max_pstate),
- int_tofp(core_pct * 1000)));
+ mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
- sample->core_pct_busy = mul_fp(int_tofp(core_pct),
- div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
+ sample->core_pct_busy = mul_fp(core_pct, c0_pct);
}
static inline void intel_pstate_sample(struct cpudata *cpu)
@@ -570,6 +582,10 @@
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc();
+ aperf = aperf >> FRAC_BITS;
+ mperf = mperf >> FRAC_BITS;
+ tsc = tsc >> FRAC_BITS;
+
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
@@ -601,7 +617,8 @@
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ return FP_ROUNDUP(core_busy);
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e10b646..6684e03 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1076,7 +1076,7 @@
{
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
- int rc;
+ int rc, cpu;
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
@@ -1140,7 +1140,9 @@
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
- per_cpu(powernow_data, pol->cpu) = data;
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
return 0;
@@ -1155,6 +1157,7 @@
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
if (!data)
return -EINVAL;
@@ -1165,7 +1168,8 @@
kfree(data->powernow_table);
kfree(data);
- per_cpu(powernow_data, pol->cpu) = NULL;
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = NULL;
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 78fd174..f48607c 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -14,6 +14,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/runlatch.h>
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -30,12 +31,14 @@
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
+ ppc64_runlatch_off();
while (!need_resched()) {
HMT_low();
HMT_very_low();
}
HMT_medium();
+ ppc64_runlatch_on();
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb();
return index;
@@ -45,7 +48,9 @@
struct cpuidle_driver *drv,
int index)
{
+ ppc64_runlatch_off();
power7_idle();
+ ppc64_runlatch_on();
return index;
}
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 7ab564a..6f7b019 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -17,6 +17,7 @@
#include <asm/reg.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/runlatch.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -29,6 +30,7 @@
static inline void idle_loop_prolog(unsigned long *in_purr)
{
+ ppc64_runlatch_off();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
@@ -45,6 +47,10 @@
wait_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0;
+
+ if (irqs_disabled())
+ local_irq_enable();
+ ppc64_runlatch_on();
}
static int snooze_loop(struct cpuidle_device *dev,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e79183..19041ce 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -449,6 +449,7 @@
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
+ { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181..4e3549a 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -116,7 +118,6 @@
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -354,13 +355,49 @@
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+ msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -379,9 +416,7 @@
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -526,8 +561,11 @@
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877..e982f00 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe..8d10580 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -553,10 +556,10 @@
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -566,7 +569,6 @@
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e9..b9b38a1 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 00a2de9..bf18c78 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1641,6 +1641,7 @@
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1668,6 +1669,7 @@
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
@@ -1690,7 +1692,7 @@
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index d63f479..57e96a3 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -943,33 +943,35 @@
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
+ pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
+ if (!pvt->pci_dev_16_1_fsb_addr_map)
+ pvt->pci_dev_16_1_fsb_addr_map =
+ pci_dev_get(pdev);
break;
case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
+ if (!pvt->pci_dev_16_2_fsb_err_regs)
+ pvt->pci_dev_16_2_fsb_err_regs =
+ pci_dev_get(pdev);
break;
}
}
+ if (!pvt->pci_dev_16_1_fsb_addr_map ||
+ !pvt->pci_dev_16_2_fsb_err_regs) {
+ /* At least one device was not found */
+ i7300_printk(KERN_ERR,
+ "'system address,Process Bus' device not found:"
+ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+ goto error;
+ }
+
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 87533ca..d871275 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1334,14 +1334,19 @@
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+ }
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
+ !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
*prev);
+ }
if (!pdev) {
if (*prev) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c20602f..98a14f6 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -222,27 +222,19 @@
struct snd_soc_dapm_context *dapm = arizona->dapm;
int ret;
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_force_enable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
}
}
@@ -304,16 +296,12 @@
ARIZONA_MICD_ENA, 0,
&change);
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa40..2c6d5e1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@
dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
+static void fw_device_workfn(struct work_struct *work)
+{
+ struct fw_device *device = container_of(to_delayed_work(work),
+ struct fw_device, work);
+ device->workfn(work);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -1252,7 +1259,8 @@
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
- INIT_DELAYED_WORK(&device->work, fw_device_init);
+ device->workfn = fw_device_init;
+ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
@@ -1268,7 +1276,7 @@
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
@@ -1283,7 +1291,7 @@
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
@@ -1308,7 +1316,7 @@
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 6b89598..4af0a7b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -929,8 +929,6 @@
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fwnet_transmit_packet_failed(ptask);
-
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@
errors_skipped = 0;
last_rcode = rcode;
- } else
+ } else {
errors_skipped++;
+ }
+ fwnet_transmit_packet_failed(ptask);
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6f74d8d..8db6632 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,7 +290,6 @@
#define QUIRK_NO_MSI 0x10
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -303,10 +302,7 @@
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
-
- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
- QUIRK_PHY_LCTRL_TIMEOUT},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -2299,9 +2294,6 @@
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
* cannot actually use the phy at that time. These need tens of
* millisecods pause between LPS write and first phy access too.
- *
- * But do not wait for 50msec on Agere/LSI cards. Their phy
- * arbitration state machine may time out during such a long wait.
*/
reg_write(ohci, OHCI1394_HCControlSet,
@@ -2309,11 +2301,8 @@
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
+ for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
-
- for (lps = 0, i = 0; !lps && i < 150; i++) {
- msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029d..7aef911 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@
*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
@@ -864,7 +865,7 @@
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
@@ -918,7 +919,7 @@
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -972,6 +973,13 @@
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 1b5e8e4..7160c43 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -584,7 +584,7 @@
.remove = dcdbas_remove,
};
-static const struct platform_device_info dcdbas_dev_info __initdata = {
+static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
.id = -1,
.dma_mask = DMA_BIT_MASK(32),
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
index b6bffbf..ff50aee 100644
--- a/drivers/firmware/efi/efi-stub-helper.c
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -16,18 +16,6 @@
u64 size;
};
-
-
-
-static void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
-
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- efi_call_phys2(out->output_string, out, str);
-}
-
static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
{
char *s8;
@@ -65,20 +53,23 @@
* allocation which may be in a new descriptor region.
*/
*map_size += sizeof(*m);
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, *map_size, (void **)&m);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ *map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
- map_size, m, &key, desc_size, &desc_version);
+ *desc_size = 0;
+ key = 0;
+ status = efi_call_early(get_memory_map, map_size, m,
+ &key, desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
goto again;
}
if (status != EFI_SUCCESS)
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
+
if (key_ptr && status == EFI_SUCCESS)
*key_ptr = key;
if (desc_ver && status == EFI_SUCCESS)
@@ -158,7 +149,7 @@
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
@@ -170,8 +161,7 @@
*addr = max_addr;
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
-
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -231,7 +221,7 @@
if ((start + size) > end)
continue;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &start);
if (status == EFI_SUCCESS) {
@@ -243,7 +233,7 @@
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -257,7 +247,7 @@
return;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+ efi_call_early(free_pages, addr, nr_pages);
}
@@ -276,9 +266,7 @@
{
struct file_info *files;
unsigned long file_addr;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
u64 file_size_total;
- efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_status_t status;
int nr_files;
@@ -319,10 +307,8 @@
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA,
- nr_files * sizeof(*files),
- (void **)&files);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
goto fail;
@@ -331,13 +317,8 @@
str = cmd_line;
for (i = 0; i < nr_files; i++) {
struct file_info *file;
- efi_file_handle_t *h;
- efi_file_info_t *info;
efi_char16_t filename_16[256];
- unsigned long info_sz;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
efi_char16_t *p;
- u64 file_sz;
str = strstr(str, option_string);
if (!str)
@@ -368,71 +349,18 @@
/* Only open the volume once. */
if (!i) {
- efi_boot_services_t *boottime;
-
- boottime = sys_table_arg->boottime;
-
- status = efi_call_phys3(boottime->handle_protocol,
- image->device_handle, &fs_proto,
- (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ status = efi_open_volume(sys_table_arg, image,
+ (void **)&fh);
+ if (status != EFI_SUCCESS)
goto free_files;
- }
-
- status = efi_call_phys2(io->open_volume, io, &fh);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open volume\n");
- goto free_files;
- }
}
- status = efi_call_phys5(fh->open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ status = efi_file_size(sys_table_arg, fh, filename_16,
+ (void **)&file->handle, &file->size);
+ if (status != EFI_SUCCESS)
goto close_handles;
- }
- file->handle = h;
-
- info_sz = 0;
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
- goto close_handles;
- }
-
-grow:
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, info_sz,
- (void **)&info);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
- goto close_handles;
- }
-
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool,
- info);
- goto grow;
- }
-
- file_sz = info->file_size;
- efi_call_phys1(sys_table_arg->boottime->free_pool, info);
-
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to get file info\n");
- goto close_handles;
- }
-
- file->size = file_sz;
- file_size_total += file_sz;
+ file_size_total += file->size;
}
if (file_size_total) {
@@ -468,10 +396,10 @@
chunksize = EFI_READ_CHUNK_SIZE;
else
chunksize = size;
- status = efi_call_phys3(fh->read,
- files[j].handle,
- &chunksize,
- (void *)addr);
+
+ status = efi_file_read(fh, files[j].handle,
+ &chunksize,
+ (void *)addr);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to read file\n");
goto free_file_total;
@@ -480,12 +408,12 @@
size -= chunksize;
}
- efi_call_phys1(fh->close, files[j].handle);
+ efi_file_close(fh, files[j].handle);
}
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -497,9 +425,9 @@
close_handles:
for (k = j; k < i; k++)
- efi_call_phys1(fh->close, files[k].handle);
+ efi_file_close(fh, files[k].handle);
free_files:
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -545,7 +473,7 @@
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &efi_addr);
new_addr = efi_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4753bac..af20f17 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -233,7 +233,7 @@
{SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
- {NULL_GUID, NULL, 0},
+ {NULL_GUID, NULL, NULL},
};
static __init int match_config_table(efi_guid_t *guid,
@@ -313,5 +313,8 @@
}
pr_cont("\n");
early_iounmap(config_tables, efi.systab->nr_tables * sz);
+
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
return 0;
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 3dc2482..50ea412 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -227,7 +227,7 @@
memcpy(&entry->var, new_var, count);
err = efivar_entry_set(entry, new_var->Attributes,
- new_var->DataSize, new_var->Data, false);
+ new_var->DataSize, new_var->Data, NULL);
if (err) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
return -EIO;
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index ee5b479..9bb2cbd 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -27,7 +27,7 @@
/* The "file=" is like the generic "gateware=" used elsewhere */
static char *fwe_file[FMC_MAX_CARDS];
static int fwe_file_n;
-module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444);
+module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
int write)
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index acf3a36..32982da 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@
{
struct armada_private *priv = dev->dev_private;
- /*
- * Yes, we really must jump through these hoops just to store a
- * _pointer_ to something into the kfifo. This is utterly insane
- * and idiotic, because it kfifo requires the _data_ pointed to by
- * the pointer const, not the pointer itself. Not only that, but
- * you have to pass a pointer _to_ the pointer you want stored.
- */
- const struct drm_framebuffer *silly_api_alert = fb;
- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
schedule_work(&priv->fb_unref_work);
}
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index c8fcf12..5f8b0c2 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,6 +2,7 @@
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index bb8f580..534cb89 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -32,6 +32,12 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
+ * in the caller.
+ */
static void
drm_clflush_page(struct page *page)
{
@@ -44,7 +50,7 @@
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
- clflush(page_virtual + i);
+ clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
@@ -133,7 +139,7 @@
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
- clflush(end - 1);
+ clflushopt(end - 1);
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 5736aaa..f7af69b 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -468,8 +468,8 @@
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
- drm_put_dev(dev);
list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
}
}
DRM_INFO("Module unloaded\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 215131a..c204b4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -172,20 +172,24 @@
ret = exynos_drm_subdrv_open(dev, file);
if (ret)
- goto out;
+ goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
ret = PTR_ERR(anon_filp);
- goto out;
+ goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
-out:
+
+err_subdrv_close:
+ exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02..ec7bb0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -403,7 +403,7 @@
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pch;
+ struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
@@ -424,12 +424,9 @@
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- while (pch) {
- struct pci_dev *curr = pch;
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -461,18 +458,16 @@
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
- } else {
- goto check_next;
- }
- pci_dev_put(pch);
+ } else
+ continue;
+
break;
}
-check_next:
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
- pci_dev_put(curr);
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found?\n");
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40a2b36..d278be1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -842,7 +842,7 @@
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
- false);
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1a24e84..28d24ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
@@ -201,6 +214,13 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
if (dev_priv->gtt.stolen_size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9fec711..d554169 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -618,33 +618,25 @@
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
+ int reg;
- if (INTEL_INFO(dev)->gen < 7) {
- status = pipe == PIPE_A ?
- DE_PIPEA_VBLANK :
- DE_PIPEB_VBLANK;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ status = GEN8_PIPE_VBLANK;
+ reg = GEN8_DE_PIPE_ISR(pipe);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ status = DE_PIPE_VBLANK_IVB(pipe);
+ reg = DEISR;
} else {
- switch (pipe) {
- default:
- case PIPE_A:
- status = DE_PIPEA_VBLANK_IVB;
- break;
- case PIPE_B:
- status = DE_PIPEB_VBLANK_IVB;
- break;
- case PIPE_C:
- status = DE_PIPEC_VBLANK_IVB;
- break;
- }
+ status = DE_PIPE_VBLANK(pipe);
+ reg = DEISR;
}
- return __raw_i915_read32(dev_priv, DEISR) & status;
+ return __raw_i915_read32(dev_priv, reg) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -702,7 +694,28 @@
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_DDI(dev)) {
+ /*
+ * On HSW HDMI outputs there seems to be a 2 line
+ * difference, whereas eDP has the normal 1 line
+ * difference that earlier platforms have. External
+ * DP is unknown. For now just check for the 2 line
+ * difference case on all output types on HSW+.
+ *
+ * This might misinterpret the scanline counter being
+ * one line too far along on eDP, but that's less
+ * dangerous than the alternative since that would lead
+ * the vblank timestamp code astray when it sees a
+ * scanline count before vblank_start during a vblank
+ * interrupt.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && (position == vbl_start - 2 ||
+ position == vbl_start - 1)) ||
+ (!in_vbl && (position == vbl_end - 2 ||
+ position == vbl_end - 1)))
+ position = (position + 2) % vtotal;
+ } else if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
@@ -2769,10 +2782,9 @@
return;
if (HAS_PCH_IBX(dev)) {
- mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
- SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
} else {
- mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
@@ -2832,20 +2844,19 @@
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB);
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
- extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
dev_priv->irq_mask = ~display_mask;
@@ -2961,9 +2972,9 @@
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e06b9e0..234ac5f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1244,6 +1244,7 @@
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c16728..9b8a7c7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1092,12 +1092,12 @@
struct drm_device *dev = dev_priv->dev;
bool cur_state;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 57552eb..2688f6d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1249,17 +1249,24 @@
DRM_DEBUG_KMS("Turn eDP power off\n");
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
+
+ /* We got a reference when we enabled the VDD. */
+ intel_runtime_pm_put(dev_priv);
}
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1639,7 +1646,7 @@
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
- IS_BROADWELL(dev) ? 0 : link_entry_time |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@@ -1784,6 +1791,7 @@
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d..ee3181e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -845,7 +845,7 @@
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (IS_G4X(dev))
+ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -899,8 +899,8 @@
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
- && HAS_PCH_SPLIT(dev)) {
+ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de35..079ea38 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -698,7 +698,7 @@
freq /= 0xff;
ctl = freq << 17;
- if (IS_GEN2(dev) && panel->backlight.combination_mode)
+ if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
@@ -979,7 +979,7 @@
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81..e1fc35a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3493,6 +3493,8 @@
u32 pcbr;
int pctx_size = 24*1024;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -3542,8 +3544,6 @@
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- valleyview_setup_pctx(dev);
-
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -4395,6 +4395,8 @@
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 4ef83df..83face3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -106,6 +106,29 @@
return 0;
}
+/*
+ * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
+ * requirements on the fourth parameter, so a private implementation
+ * instead of using acpi_check_dsm().
+ */
+static int nouveau_check_optimus_dsm(acpi_handle handle)
+{
+ int result;
+
+ /*
+ * Function 0 returns a Buffer containing available functions.
+ * The args parameter is ignored for function 0, so just put 0 in it
+ */
+ if (nouveau_optimus_dsm(handle, 0, 0, &result))
+ return 0;
+
+ /*
+ * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
+ * If the n-th bit is enabled, function n is supported
+ */
+ return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
+}
+
static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
int ret = 0;
@@ -207,8 +230,7 @@
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
- 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (nouveau_check_optimus_dsm(dhandle))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 89c484d..4ee702a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -866,13 +866,16 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- return -EINVAL;
+ pm_runtime_forbid(dev);
+ return -EBUSY;
}
nv_debug_level(SILENT);
@@ -923,12 +926,15 @@
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0)
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 0d19f4f..daa4dd3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1774,6 +1774,20 @@
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE41(rdev)) {
+ /* Don't share PLLs on DCE4.1 chips */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ }
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
@@ -1801,7 +1815,7 @@
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ } else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2cec2ab..607dc14 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1314,7 +1314,7 @@
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6419ca..bbb1784 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3046,7 +3046,7 @@
}
/**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
*
* @rdev: radeon_device pointer
* @max_rb_num: max RBs (render backends) for the asic
@@ -4134,8 +4134,11 @@
{
if (enable)
WREG32(CP_MEC_CNTL, 0);
- else
+ else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
udelay(50);
}
@@ -7902,7 +7905,8 @@
/* init golden registers */
cik_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cik_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 1ecb3f1..94626ea 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -264,6 +264,8 @@
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
}
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
/**
@@ -291,6 +293,11 @@
u32 me_cntl, reg_offset;
int i;
+ if (enable == false) {
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ }
+
for (i = 0; i < 2; i++) {
if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET;
@@ -420,10 +427,6 @@
if (!rdev->sdma_fw)
return -EINVAL;
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
/* halt the MEs */
cik_sdma_enable(rdev, false);
@@ -492,9 +495,6 @@
*/
void cik_sdma_fini(struct radeon_device *rdev)
{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
/* halt the MEs */
cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 713a5d3..94e8587 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -278,13 +278,15 @@
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- AUDIO_ENABLED);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ enable ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
@@ -323,7 +325,8 @@
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5623e75..27b0ff1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5299,7 +5299,8 @@
/* init golden registers */
evergreen_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = evergreen_startup(rdev);
@@ -5475,9 +5476,9 @@
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- evergreen_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0c6d5ce..05b0c95 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -306,6 +306,15 @@
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -409,12 +418,16 @@
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -427,15 +440,6 @@
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8c..3a03ba3 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ea932ac..bf6300c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2105,7 +2105,8 @@
/* init golden registers */
ni_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cayman_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ef024ce..3cc78bb 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3942,8 +3942,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7c63ef8..0b658b3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,8 +1430,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3768aab..802b192 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,8 +325,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e209eb7..98d6053 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,8 +240,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cdbc417..647ef40 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2968,7 +2968,8 @@
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = r600_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b8..bffac10 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 3016fc1..85a2bb2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -329,9 +329,6 @@
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
@@ -460,6 +457,10 @@
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -531,6 +532,9 @@
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -651,11 +655,6 @@
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 024db37..e887d02 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2747,6 +2747,12 @@
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 485848f..fa9a9c0 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -219,7 +219,8 @@
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u\n", output.version);
+ printk("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b012cbb..044bc98 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1521,13 +1521,16 @@
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 84a1bbb7..f633c27 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -403,11 +403,15 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (radeon_runtime_pm == 0)
- return -EINVAL;
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
- if (radeon_runtime_pm == -1 && !radeon_is_px())
- return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -456,12 +460,15 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_crtc *crtc;
- if (radeon_runtime_pm == 0)
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we PX enabled? */
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 114d167..66ed3ea 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -130,7 +137,8 @@
"Error during ACPI methods call\n");
}
- if (radeon_runtime_pm != 0) {
+ if ((radeon_runtime_pm == 1) ||
+ ((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -537,6 +545,10 @@
radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r)
+ return r;
+
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
@@ -544,6 +556,8 @@
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
+
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 77f5b0c..040a2a1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -714,6 +714,9 @@
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+ /* Change the size here instead of the init above so only lpfn is affected */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
@@ -935,7 +938,7 @@
while (size) {
loff_t p = *pos / PAGE_SIZE;
unsigned off = *pos & ~PAGE_MASK;
- ssize_t cur_size = min(size, PAGE_SIZE - off);
+ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
struct page *page;
void *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6781fee..3e6804b 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -171,6 +171,8 @@
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
release_firmware(rdev->uvd_fw);
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b5c2369..130d5cc 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,8 +474,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fdcde76..72d3616 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,8 +1048,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3595073..3462b64 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,8 +756,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 98e8138..237dd29 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,8 +586,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6c772e5..fef3107 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1811,7 +1811,8 @@
/* init golden registers */
rv770_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = rv770_startup(rdev);
@@ -1955,9 +1956,9 @@
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- rv770_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 8357832..9a124d0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6618,7 +6618,8 @@
/* init golden registers */
si_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = si_startup(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 88a5290..c715947 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,7 +104,7 @@
static void tegra_drm_lastclose(struct drm_device *drm)
{
-#ifdef CONFIG_TEGRA_DRM_FBDEV
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 338f7f6..0266fb4 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -15,6 +15,7 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -89,6 +90,9 @@
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (rgb->enabled)
+ return 0;
+
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -122,6 +126,8 @@
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ rgb->enabled = true;
+
return 0;
}
@@ -130,6 +136,9 @@
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (!rgb->enabled)
+ return 0;
+
value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
@@ -144,6 +153,8 @@
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ rgb->enabled = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a066513..214b799 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 801231c..0ce48e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -339,11 +339,13 @@
vma->vm_private_data = bo;
/*
- * PFNMAP is faster than MIXEDMAP due to reduced page
- * administration. So use MIXEDMAP only if private VMA, where
- * we need to support COW.
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
*/
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
@@ -359,7 +361,7 @@
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8d67b94..0394811 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -177,8 +177,10 @@
if (obj->vmapping)
udl_gem_vunmap(obj);
- if (gem_obj->import_attach)
+ if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
+ put_device(gem_obj->dev->dev);
+ }
if (obj->pages)
udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@
int ret;
/* need to attach */
+ get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
return ERR_CAST(attach);
+ }
get_dma_buf(dma_buf);
@@ -282,6 +287,6 @@
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
-
+ put_device(dev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index bb594c1..f58dc7d 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -261,12 +261,7 @@
/* Planar video formats. */
SVGA3D_YV12 = 121,
- /* Shader constant formats. */
- SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
- SVGA3D_SURFACE_SHADERCONST_INT = 123,
- SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
-
- SVGA3D_FORMAT_MAX = 125,
+ SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 9e4be17..0783155 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,7 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20121114"
+#define VMWGFX_DRIVER_DATE "20140228"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d4a5a19..04a64b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -188,18 +188,20 @@
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable "
+ "takedown.\n");
+ } else {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
if (bo) {
int ret;
@@ -562,11 +564,12 @@
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
+ } else {
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 2aa4bc6..9757b57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -427,8 +427,7 @@
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- (user) ? ttm_bo_type_device :
- ttm_bo_type_kernel, placement,
+ ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 82468d9..e7af580 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -830,6 +830,24 @@
if (unlikely(ret != 0))
goto out_unlock;
+ /*
+ * A gb-aware client referencing a shared surface will
+ * expect a backup buffer to be present.
+ */
+ if (dev_priv->has_mob && req->shareable) {
+ uint32_t backup_handle;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 1146e3b..112f27e 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -538,7 +538,7 @@
g->base = job->gather_addr_phys[i];
- for (j = 0; j < job->num_gathers; j++)
+ for (j = i + 1; j < job->num_gathers; j++)
if (job->gathers[j].bo == g->bo)
job->gathers[j].handled = true;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index befe0e3..24883b4 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,6 +43,7 @@
#define G25_REV_MIN 0x22
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
+#define G27_2_REV_MIN 0x39
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
@@ -130,6 +131,7 @@
{DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
{G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
{G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+ {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
};
/* Recalculates X axis value accordingly to currently selected range */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 1235405..2f19b15 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -42,6 +42,7 @@
#define DUALSHOCK4_CONTROLLER_BT BIT(6)
#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
#define MAX_LEDS 4
@@ -499,6 +500,7 @@
__u8 right;
#endif
+ __u8 worker_initialized;
__u8 led_state[MAX_LEDS];
__u8 led_count;
};
@@ -993,22 +995,11 @@
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
}
-static void sony_destroy_ff(struct hid_device *hdev)
-{
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- cancel_work_sync(&sc->state_worker);
-}
-
#else
static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
-
-static void sony_destroy_ff(struct hid_device *hdev)
-{
-}
#endif
static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
ret = sixaxis_set_operational_usb(hdev);
+
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, sixaxis_state_worker);
}
else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@
if (ret < 0)
goto err_stop;
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, dualshock4_state_worker);
} else {
ret = 0;
@@ -1101,9 +1095,11 @@
goto err_stop;
}
- ret = sony_init_ff(hdev);
- if (ret < 0)
- goto err_stop;
+ if (sc->quirks & SONY_FF_SUPPORT) {
+ ret = sony_init_ff(hdev);
+ if (ret < 0)
+ goto err_stop;
+ }
return 0;
err_stop:
@@ -1120,7 +1116,8 @@
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(hdev);
- sony_destroy_ff(hdev);
+ if (sc->worker_initialized)
+ cancel_work_sync(&sc->state_worker);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cb0137b..ab24ce2 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -320,13 +320,13 @@
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
+ device_destroy(hidraw_class,
+ MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
}
if (!hidraw->open) {
if (!hidraw->exist) {
- device_destroy(hidraw_class,
- MKDEV(hidraw_major, hidraw->minor));
hidraw_table[hidraw->minor] = NULL;
kfree(hidraw);
} else {
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index a762635..029b65e 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -243,7 +243,7 @@
data->temp_min[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
- data->temp_max[index]))
+ data->temp_min[index]))
count = -EIO;
mutex_unlock(&data->update_lock);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f5ed031..de17c55 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -387,7 +387,7 @@
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
- depends on (CPM1 || CPM2) && OF_I2C
+ depends on CPM1 || CPM2
help
This supports the use of the I2C interface on Freescale
processors with CPM1 or CPM2.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index be7f0a2..f3b89a4 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -39,7 +39,9 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 41c64a4..ac2d69e 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -70,7 +70,7 @@
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index f8f2bf8..c197360 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,7 +19,6 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
-#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index d53d91a..a8e174a 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -167,11 +167,10 @@
.wai = ST_GYRO_2_WAI_EXP,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
- [1] = L3GD20H_GYRO_DEV_NAME,
- [2] = LSM330D_GYRO_DEV_NAME,
- [3] = LSM330DLC_GYRO_DEV_NAME,
- [4] = L3G4IS_GYRO_DEV_NAME,
- [5] = LSM330_GYRO_DEV_NAME,
+ [1] = LSM330D_GYRO_DEV_NAME,
+ [2] = LSM330DLC_GYRO_DEV_NAME,
+ [3] = L3G4IS_GYRO_DEV_NAME,
+ [4] = LSM330_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 16b8b8d..23c12f3 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -55,7 +55,6 @@
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 94763e2..b4ad3be 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,7 +54,6 @@
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index f17b4e6..47a6dba 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -103,13 +103,13 @@
/**
* cm32181_read_als_it() - Get sensor integration time (ms)
* @cm32181: pointer of struct cm32181
- * @val: pointer of int to load the als_it value.
+ * @val2: pointer of int to load the als_it value.
*
* Report the current integartion time by millisecond.
*
- * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
-static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
{
u16 als_it;
int i;
@@ -119,8 +119,8 @@
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
if (als_it == als_it_bits[i]) {
- *val = als_it_value[i];
- return IIO_VAL_INT;
+ *val2 = als_it_value[i];
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -221,7 +221,7 @@
*val = cm32181->calibscale;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_read_als_it(cm32181, val);
+ ret = cm32181_read_als_it(cm32181, val2);
return ret;
}
@@ -240,7 +240,7 @@
cm32181->calibscale = val;
return val;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_write_als_it(cm32181, val);
+ ret = cm32181_write_als_it(cm32181, val2);
return ret;
}
@@ -264,7 +264,7 @@
n = ARRAY_SIZE(als_it_value);
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "%d ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
return len + sprintf(buf + len, "\n");
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 0a142af..a45e074 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -50,10 +50,10 @@
#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
/* CS_CONF3 channel integration time */
-#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
-#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
-#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
-#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
+#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
/* PS_CONF1 command code */
#define CM36651_PS_ENABLE 0x00
@@ -64,10 +64,10 @@
#define CM36651_PS_PERS4 0x0C
/* PS_CONF1 command code: integration time */
-#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
-#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
-#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
-#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
+#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
+#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
+#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
+#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
/* PS_CONF1 command code: duty ratio */
#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
@@ -93,8 +93,8 @@
#define CM36651_CLOSE_PROXIMITY 0x32
#define CM36651_FAR_PROXIMITY 0x33
-#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
-#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
+#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
+#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
enum cm36651_operation_mode {
CM36651_LIGHT_EN,
@@ -356,30 +356,30 @@
}
static int cm36651_read_int_time(struct cm36651_data *cm36651,
- struct iio_chan_spec const *chan, int *val)
+ struct iio_chan_spec const *chan, int *val2)
{
switch (chan->type) {
case IIO_LIGHT:
if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
- *val = 80000;
+ *val2 = 80000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
- *val = 160000;
+ *val2 = 160000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
- *val = 320000;
+ *val2 = 320000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
- *val = 640000;
+ *val2 = 640000;
else
return -EINVAL;
break;
case IIO_PROXIMITY:
if (cm36651->ps_int_time == CM36651_PS_IT1)
- *val = 320;
+ *val2 = 320;
else if (cm36651->ps_int_time == CM36651_PS_IT2)
- *val = 420;
+ *val2 = 420;
else if (cm36651->ps_int_time == CM36651_PS_IT3)
- *val = 520;
+ *val2 = 520;
else if (cm36651->ps_int_time == CM36651_PS_IT4)
- *val = 640;
+ *val2 = 640;
else
return -EINVAL;
break;
@@ -387,7 +387,7 @@
return -EINVAL;
}
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
@@ -459,7 +459,8 @@
ret = cm36651_read_channel(cm36651, chan, val);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm36651_read_int_time(cm36651, chan, val);
+ *val = 0;
+ ret = cm36651_read_int_time(cm36651, chan, val2);
break;
default:
ret = -EINVAL;
@@ -479,7 +480,7 @@
int ret = -EINVAL;
if (mask == IIO_CHAN_INFO_INT_TIME) {
- ret = cm36651_write_int_time(cm36651, chan, val);
+ ret = cm36651_write_int_time(cm36651, chan, val2);
if (ret < 0)
dev_err(&client->dev, "Integration time write failed\n");
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e81c554..f9c12e9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -53,8 +53,8 @@
#include "user.h"
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "April 4, 2008"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index aa03e73..bf90057 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,8 +46,8 @@
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d18d08a..8ee228e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -492,12 +492,11 @@
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
- init_waitqueue_head(&isert_conn->conn_wait);
- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->conn_wait);
+ init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
- mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -688,11 +687,11 @@
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
@@ -712,7 +711,7 @@
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
- wake_up(&isert_conn->conn_wait);
+ complete(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@@ -888,16 +887,17 @@
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
*/
- mutex_lock(&isert_conn->conn_comp_mutex);
- if (coalesce &&
+ mutex_lock(&isert_conn->conn_mutex);
+ if (coalesce && isert_conn->state == ISER_CONN_UP &&
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ tx_desc->llnode_active = true;
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
return;
}
isert_conn->conn_comp_batch = 0;
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1464,7 +1464,7 @@
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1476,7 +1476,7 @@
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1486,7 +1486,7 @@
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
@@ -1549,6 +1549,7 @@
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
cmd->write_data_done = wr->cur_rdma_length;
+ wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1589,7 +1590,7 @@
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_free_conn()
+ * from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1613,6 +1614,7 @@
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
@@ -1691,31 +1693,76 @@
}
static void
-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+{
+ struct llist_node *llnode;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ llnode = llist_del_all(&isert_conn->conn_comp_llist);
+ isert_conn->conn_comp_batch = 0;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
+
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+}
+
+static void
+isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
- if (tx_desc) {
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+ tx_desc->comp_llnode_batch = NULL;
+
+ if (!isert_cmd)
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ else
+ isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
+
+static void
+isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ if (isert_conn->post_recv_buf_count)
+ return;
+
+ isert_cq_drain_comp_llist(isert_conn, ib_dev);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
}
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- pr_debug("Calling wake_up from isert_cq_comp_err\n");
+ while (atomic_read(&isert_conn->post_send_buf_count))
+ msleep(3000);
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state != ISER_CONN_DOWN)
- isert_conn->state = ISER_CONN_TERMINATING;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
- wake_up(&isert_conn->conn_wait_comp_err);
- }
+ complete(&isert_conn->conn_wait_comp_err);
}
static void
@@ -1740,8 +1787,14 @@
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_comp_err(tx_desc, isert_conn);
+
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ if (tx_desc->llnode_active)
+ continue;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
+ isert_cq_tx_comp_err(tx_desc, isert_conn);
+ }
}
}
@@ -1784,7 +1837,7 @@
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
- isert_cq_comp_err(NULL, isert_conn);
+ isert_cq_rx_comp_err(isert_conn);
}
}
@@ -2202,6 +2255,7 @@
if (!fr_desc->valid) {
memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
wr = &inv_wr;
@@ -2212,6 +2266,7 @@
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
+ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
fr_wr.wr.fast_reg.iova_start =
fr_desc->data_frpl->page_list[0] + page_off;
@@ -2377,12 +2432,12 @@
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr, true);
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
isert_cmd);
@@ -2410,12 +2465,12 @@
return rc;
}
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
@@ -2702,22 +2757,11 @@
kfree(isert_np);
}
-static int isert_check_state(struct isert_conn *isert_conn, int state)
-{
- int ret;
-
- mutex_lock(&isert_conn->conn_mutex);
- ret = (isert_conn->state == state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- return ret;
-}
-
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_free_conn: Starting \n");
+ pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2727,38 +2771,29 @@
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
- if (isert_conn->state == ISER_CONN_UP) {
- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
- isert_conn->state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- wait_event(isert_conn->conn_wait_comp_err,
- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
-
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
-
- isert_put_conn(isert_conn);
- return;
- }
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
return;
}
- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
- isert_conn->state);
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+
+ wait_for_completion(&isert_conn->conn_wait);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
@@ -2771,6 +2806,7 @@
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
+ .iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 708a069..f6ae7f5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@
struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
+ bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -116,8 +118,8 @@
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
- wait_queue_head_t conn_wait;
- wait_queue_head_t conn_wait_comp_err;
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
struct kref conn_kref;
struct list_head conn_fr_pool;
int conn_fr_pool_size;
@@ -126,7 +128,6 @@
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
- struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a06e125..ce953d8 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -954,11 +954,13 @@
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
+ if (error)
+ return error;
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
- return error;
+ return 0;
}
/* Multi-number variable-length handlers */
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index bb3b57b..5ef7fcf 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -76,8 +76,18 @@
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ int val;
- return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+ mutex_lock(&kpad->gpio_lock);
+
+ if (kpad->dir[bank] & bit)
+ val = kpad->dat_out[bank];
+ else
+ val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 7a04f54..ef2e281 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,7 +37,6 @@
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
- struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
int ret;
if (!haptics->arizona->dapm) {
@@ -67,13 +66,10 @@
return;
}
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -81,21 +77,14 @@
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
-
- mutex_unlock(dapm_mutex);
-
} else {
/* This disable sequence will be a noop if already enabled */
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -103,12 +92,9 @@
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
- mutex_unlock(dapm_mutex);
-
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK,
@@ -155,16 +141,11 @@
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
- struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
cancel_work_sync(&haptics->work);
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (haptics->arizona->dapm)
snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
-
- mutex_unlock(dapm_mutex);
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 1f695f2..184c8f2 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -27,29 +27,32 @@
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
- int key_stat;
+ int ret;
- key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
- if (key_stat < 0) {
+ ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+ if (ret < 0) {
dev_err(onkey->da9052->dev,
- "Failed to read onkey event %d\n", key_stat);
+ "Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
- key_stat &= DA9052_EVENTB_ENONKEY;
- input_report_key(onkey->input, KEY_POWER, key_stat);
- input_sync(onkey->input);
- }
+ bool pressed = !(ret & DA9052_STATUSA_NONKEY);
- /*
- * Interrupt is generated only when the ONKEY pin is asserted.
- * Hence the deassertion of the pin is simulated through work queue.
- */
- if (key_stat)
- schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+ input_report_key(onkey->input, KEY_POWER, pressed);
+ input_sync(onkey->input);
+
+ /*
+ * Interrupt is generated only when the ONKEY pin
+ * is asserted. Hence the deassertion of the pin
+ * is simulated through work queue.
+ */
+ if (pressed)
+ schedule_delayed_work(&onkey->work,
+ msecs_to_jiffies(50));
+ }
}
static void da9052_onkey_work(struct work_struct *work)
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 87095e2..8af34ff 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -409,7 +409,6 @@
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9..d8d49d1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
+
+static const int *quirk_min_max;
+
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1485,10 +1496,54 @@
{ }
};
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+ {
+ /* Lenovo ThinkPad Helix */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ .driver_data = (int []){1024, 5052, 2258, 4832},
+ },
+ {
+ /* Lenovo ThinkPad X240 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
+ const struct dmi_system_id *min_max_dmi;
+
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 4c842c3..b604564 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -67,7 +67,6 @@
struct device dev;
struct cdev cdev;
bool exist;
- bool is_mixdev;
struct list_head mixdev_node;
bool opened_by_mixdev;
@@ -77,6 +76,9 @@
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
+
+ int (*open_device)(struct mousedev *mousedev);
+ void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
@@ -116,9 +118,6 @@
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
@@ -428,9 +427,7 @@
if (retval)
return retval;
- if (mousedev->is_mixdev)
- mixdev_open_devices();
- else if (!mousedev->exist)
+ if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@
{
mutex_lock(&mousedev->mutex);
- if (mousedev->is_mixdev)
- mixdev_close_devices();
- else if (mousedev->exist && !--mousedev->open)
+ if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ int error;
- if (mousedev_mix->open++)
- return;
+ error = mutex_lock_interruptible(&mixdev->mutex);
+ if (error)
+ return error;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (!mousedev->opened_by_mixdev) {
- if (mousedev_open_device(mousedev))
- continue;
+ if (!mixdev->open++) {
+ struct mousedev *mousedev;
- mousedev->opened_by_mixdev = true;
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (!mousedev->opened_by_mixdev) {
+ if (mousedev_open_device(mousedev))
+ continue;
+
+ mousedev->opened_by_mixdev = true;
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
+ return 0;
}
/*
@@ -481,19 +484,22 @@
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ mutex_lock(&mixdev->mutex);
- if (--mousedev_mix->open)
- return;
+ if (!--mixdev->open) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (mousedev->opened_by_mixdev) {
- mousedev->opened_by_mixdev = false;
- mousedev_close_device(mousedev);
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (mousedev->opened_by_mixdev) {
+ mousedev->opened_by_mixdev = false;
+ mousedev_close_device(mousedev);
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
}
@@ -522,7 +528,7 @@
mousedev_detach_client(mousedev, client);
kfree(client);
- mousedev_close_device(mousedev);
+ mousedev->close_device(mousedev);
return 0;
}
@@ -550,7 +556,7 @@
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
- error = mousedev_open_device(mousedev);
+ error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
@@ -861,16 +867,21 @@
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
+
+ mousedev->open_device = mixdev_open_devices;
+ mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+ mousedev->open_device = mousedev_open_device;
+ mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
- mousedev->is_mixdev = mixdev;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
- if (!mousedev->is_mixdev)
+ if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8911850..1d9ab39 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
/* Stage-1 PTE */
#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -191,6 +190,9 @@
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
@@ -393,7 +395,7 @@
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- struct mutex lock;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -632,6 +634,28 @@
return IRQ_HANDLED;
}
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+ size_t size)
+{
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb();
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
+}
+
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
{
u32 reg;
@@ -650,11 +674,16 @@
if (smmu->version == 1)
reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
- /* Use the weakest memory type, so it is overridden by the pte */
- if (stage1)
- reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
- else
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ } else {
reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
+ }
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
if (smmu->version > 1) {
@@ -715,6 +744,8 @@
}
/* TTBR0 */
+ arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
+ PTRS_PER_PGD * sizeof(pgd_t));
reg = __pa(root_cfg->pgd);
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -901,7 +932,7 @@
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- mutex_init(&smmu_domain->lock);
+ spin_lock_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1128,6 +1159,7 @@
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *device_smmu = dev->archdata.iommu;
struct arm_smmu_master *master;
+ unsigned long flags;
if (!device_smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1138,7 +1170,7 @@
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, flags);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1185,7 @@
dev_name(device_smmu->dev));
goto err_unlock;
}
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1195,7 @@
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
@@ -1177,23 +1209,6 @@
arm_smmu_domain_remove_master(smmu_domain, master);
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of recursion
- * here as the SMMU table walker will not be wired through another
- * SMMU.
- */
- if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
-}
-
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
unsigned long end)
{
@@ -1210,12 +1225,11 @@
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
- pgtable_t table = alloc_page(PGALLOC_GFP);
+ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
if (!table)
return -ENOMEM;
- arm_smmu_flush_pgtable(smmu, page_address(table),
- ARM_SMMU_PTE_HWTABLE_SIZE);
+ arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
if (!pgtable_page_ctor(table)) {
__free_page(table);
return -ENOMEM;
@@ -1317,9 +1331,15 @@
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_none(*pud)) {
- pmd = pmd_alloc_one(NULL, addr);
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
+ pud_populate(NULL, pud, pmd);
+ arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+
+ pmd += pmd_index(addr);
} else
#endif
pmd = pmd_offset(pud, addr);
@@ -1328,8 +1348,6 @@
next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
flags, stage);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1346,9 +1364,15 @@
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
+ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
+ pgd_populate(NULL, pgd, pud);
+ arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+
+ pud += pud_index(addr);
} else
#endif
pud = pud_offset(pgd, addr);
@@ -1357,8 +1381,6 @@
next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
flags, stage);
- pgd_populate(NULL, pud, pgd);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
phys += next - addr;
} while (pud++, addr = next, addr < end);
@@ -1375,6 +1397,7 @@
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu;
+ unsigned long irqflags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
@@ -1397,7 +1420,7 @@
if (paddr & ~output_mask)
return -ERANGE;
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, irqflags);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1413,11 +1436,7 @@
} while (pgd++, iova != end);
out_unlock:
- mutex_unlock(&smmu_domain->lock);
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- dsb();
+ spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
return ret;
}
@@ -1987,8 +2006,10 @@
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
return 0;
}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index d97fbe4..80fffba 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -354,8 +354,8 @@
return -ENOMEM; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
+#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
+#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data)
{
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 92c41ab..2cb474a 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -515,7 +515,7 @@
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 8e94d7a..c16c186 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -201,7 +201,7 @@
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d400..8e41be6 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -111,7 +111,8 @@
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *d = irq_get_handler_data(irq);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
+
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
gc->mask_cache;
@@ -123,6 +124,19 @@
}
}
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_ack(d);
+ ct->chip.irq_unmask(d);
+ return 0;
+}
+
static int __init orion_bridge_irq_init(struct device_node *np,
struct device_node *parent)
{
@@ -143,7 +157,7 @@
}
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->name);
return ret;
@@ -176,12 +190,14 @@
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
- /* mask all interrupts */
+ /* mask and clear all interrupts */
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
irq_set_handler_data(irq, domain);
irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index f046865..9816c51 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -16,16 +16,6 @@
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
-config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support"
- depends on TTY
- help
- This option will enhance the capabilities of the /dev/capi20
- interface. It will provide a means of moving a data connection,
- established via the usual /dev/capi20 interface to a special tty
- device. If you want to use pppd with pppdcapiplugin to dial up to
- your ISP, say Y here.
-
config ISDN_CAPI_CAPI20
tristate "CAPI2.0 /dev/capi support"
help
@@ -34,6 +24,16 @@
standardized libcapi20 to access this functionality. You should say
Y/M here.
+config ISDN_CAPI_MIDDLEWARE
+ bool "CAPI2.0 Middleware support"
+ depends on ISDN_CAPI_CAPI20 && TTY
+ help
+ This option will enhance the capabilities of the /dev/capi20
+ interface. It will provide a means of moving a data connection,
+ established via the usual /dev/capi20 interface to a special tty
+ device. If you want to use pppd with pppdcapiplugin to dial up to
+ your ISP, say Y here.
+
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
depends on ISDN_I4L
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9a06fe8..95ad936 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -254,16 +254,6 @@
---help---
Provides thin provisioning and snapshots that share a data store.
-config DM_DEBUG_BLOCK_STACK_TRACING
- boolean "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
- select STACKTRACE
- ---help---
- Enable this for messages that may help debug problems with the
- block manager locking used by thin provisioning and caching.
-
- If unsure, say N.
-
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 1e018e9..0e385e4 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -872,7 +872,7 @@
{
struct mq_policy *mq = to_mq_policy(p);
- kfree(mq->table);
+ vfree(mq->table);
epool_exit(&mq->cache_pool);
epool_exit(&mq->pre_cache_pool);
kfree(mq);
@@ -1245,7 +1245,7 @@
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ffd472e..074b9c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -289,6 +289,7 @@
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+ struct dm_hook_info hook_info;
/*
* writethrough fields. These MUST remain at the end of this
@@ -297,7 +298,6 @@
*/
struct cache *cache;
dm_cblock_t cblock;
- struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -671,15 +671,16 @@
dm_cblock_t cblock)
{
sector_t bi_sector = bio->bi_iter.bi_sector;
+ sector_t block = from_cblock(cblock);
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector =
- (from_cblock(cblock) * cache->sectors_per_block) +
+ (block * cache->sectors_per_block) +
sector_div(bi_sector, cache->sectors_per_block);
else
bio->bi_iter.bi_sector =
- (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (block << cache->sectors_per_block_shift) |
(bi_sector & (cache->sectors_per_block - 1));
}
@@ -978,12 +979,13 @@
int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
+ sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) {
@@ -1010,13 +1012,15 @@
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
unsigned long flags;
+ dm_unhook_bio(&pb->hook_info, bio);
+
if (err)
mg->err = true;
+ mg->requeue_holder = false;
+
spin_lock_irqsave(&cache->lock, flags);
list_add_tail(&mg->list, &cache->completed_migrations);
- dm_unhook_bio(&pb->hook_info, bio);
- mg->requeue_holder = false;
spin_unlock_irqrestore(&cache->lock, flags);
wake_worker(cache);
@@ -2461,20 +2465,18 @@
bool discarded_block;
struct dm_bio_prison_cell *cell;
struct policy_result lookup_result;
- struct per_bio_data *pb;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these.
* Just remap to the origin and carry on.
*/
- remap_to_origin_clear_discard(cache, bio, block);
+ remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b2b8a10..3842ac7 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,29 +201,28 @@
/*
* Functions for getting the pages from a bvec.
*/
-static void bio_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+static void bio_get_page(struct dpages *dp, struct page **p,
+ unsigned long *len, unsigned *offset)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
- *p = bvec.bv_page;
- *len = bvec.bv_len;
- *offset = bvec.bv_offset;
+ struct bio_vec *bvec = dp->context_ptr;
+ *p = bvec->bv_page;
+ *len = bvec->bv_len - dp->context_u;
+ *offset = bvec->bv_offset + dp->context_u;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
-
- bio_advance(bio, bvec.bv_len);
+ struct bio_vec *bvec = dp->context_ptr;
+ dp->context_ptr = bvec + 1;
+ dp->context_u = 0;
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = bio;
+ dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ dp->context_u = bio->bi_iter.bi_bvec_done;
}
/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6eb9dc9..422a9fde 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1626,8 +1626,11 @@
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
+ int err = scsi_verify_blk_ioctl(NULL, cmd);
+ if (err)
+ r = err;
+ }
if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index f284e0b..7dfdb5c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1244,6 +1244,9 @@
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
+
+ atomic_inc(&bio->bi_remaining);
+
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index afc3d01..d6e8817 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -546,6 +546,9 @@
r = insert_exceptions(ps, area, callback, callback_context,
&full);
+ if (!full)
+ memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
dm_bufio_release(bp);
dm_bufio_forget(client, chunk);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 7da3476..fb9efc8 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -76,7 +76,7 @@
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
-#define THIN_VERSION 1
+#define THIN_VERSION 2
#define THIN_METADATA_CACHE_SIZE 64
#define SECTOR_TO_BLOCK_SHIFT 3
@@ -483,7 +483,7 @@
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
- disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
@@ -651,7 +651,7 @@
{
int r;
- pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
+ pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_METADATA_CACHE_SIZE,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
@@ -1489,6 +1489,23 @@
return r;
}
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
+{
+ bool r = false;
+ struct dm_thin_device *td, *tmp;
+
+ down_read(&pmd->root_lock);
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (td->changed) {
+ r = td->changed;
+ break;
+ }
+ }
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
@@ -1738,3 +1755,38 @@
return r;
}
+
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ down_write(&pmd->root_lock);
+ pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+ r = superblock_lock(pmd, &sblock);
+ if (r) {
+ DMERR("couldn't read superblock");
+ goto out;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+ dm_bm_unlock(sblock);
+out:
+ up_write(&pmd->root_lock);
+ return r;
+}
+
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+{
+ bool needs_check;
+
+ down_read(&pmd->root_lock);
+ needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
+ up_read(&pmd->root_lock);
+
+ return needs_check;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 9a36856..e3c857d 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -9,16 +9,14 @@
#include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-metadata.h"
-#define THIN_METADATA_BLOCK_SIZE 4096
+#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
/*
* The metadata device is currently limited in size.
- *
- * We have one block of index, which can hold 255 index entries. Each
- * index entry contains allocation info about 16k metadata blocks.
*/
-#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
/*
* A metadata device larger than 16GB triggers a warning.
@@ -27,6 +25,11 @@
/*----------------------------------------------------------------*/
+/*
+ * Thin metadata superblock flags.
+ */
+#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
+
struct dm_pool_metadata;
struct dm_thin_device;
@@ -161,6 +164,8 @@
*/
bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
+
bool dm_thin_aborted_changes(struct dm_thin_device *td);
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
@@ -202,6 +207,12 @@
dm_sm_threshold_fn fn,
void *context);
+/*
+ * Updates the superblock immediately.
+ */
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index faaf944..be70d38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -130,10 +130,11 @@
struct dm_thin_new_mapping;
/*
- * The pool runs in 3 modes. Ordered in degraded order for comparisons.
+ * The pool runs in 4 modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
+ PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
PM_READ_ONLY, /* metadata may not be changed */
PM_FAIL, /* all I/O fails */
};
@@ -198,7 +199,6 @@
};
static enum pool_mode get_pool_mode(struct pool *pool);
-static void out_of_data_space(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
/*
@@ -226,6 +226,7 @@
struct pool *pool;
struct dm_thin_device *td;
+ bool requeue_mode:1;
};
/*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@
struct dm_thin_new_mapping *overwrite_mapping;
};
-static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
struct bio *bio;
struct bio_list bios;
+ unsigned long flags;
bio_list_init(&bios);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
bio_list_merge(&bios, master);
bio_list_init(master);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@
static void requeue_io(struct thin_c *tc)
{
struct pool *pool = tc->pool;
+
+ requeue_bio_list(tc, &pool->deferred_bios);
+ requeue_bio_list(tc, &pool->retry_on_resume_list);
+}
+
+static void error_retry_list(struct pool *pool)
+{
+ struct bio *bio;
unsigned long flags;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
spin_lock_irqsave(&pool->lock, flags);
- __requeue_bio_list(tc, &pool->deferred_bios);
- __requeue_bio_list(tc, &pool->retry_on_resume_list);
+ bio_list_merge(&bios, &pool->retry_on_resume_list);
+ bio_list_init(&pool->retry_on_resume_list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
}
/*
@@ -925,13 +944,15 @@
}
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
dm_block_t free_blocks;
struct pool *pool = tc->pool;
- if (get_pool_mode(pool) != PM_WRITE)
+ if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
return -EINVAL;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -958,7 +979,7 @@
}
if (!free_blocks) {
- out_of_data_space(pool);
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
return -ENOSPC;
}
}
@@ -988,15 +1009,32 @@
spin_unlock_irqrestore(&pool->lock, flags);
}
+static bool should_error_unserviceable_bio(struct pool *pool)
+{
+ enum pool_mode m = get_pool_mode(pool);
+
+ switch (m) {
+ case PM_WRITE:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
+ return true;
+
+ case PM_OUT_OF_DATA_SPACE:
+ return pool->pf.error_if_no_space;
+
+ case PM_READ_ONLY:
+ case PM_FAIL:
+ return true;
+ default:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
+ return true;
+ }
+}
+
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
- /*
- * When pool is read-only, no cell locking is needed because
- * nothing is changing.
- */
- WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
-
- if (pool->pf.error_if_no_space)
+ if (should_error_unserviceable_bio(pool))
bio_io_error(bio);
else
retry_on_resume(bio);
@@ -1007,11 +1045,20 @@
struct bio *bio;
struct bio_list bios;
+ if (should_error_unserviceable_bio(pool)) {
+ cell_error(pool, cell);
+ return;
+ }
+
bio_list_init(&bios);
cell_release(pool, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- handle_unserviceable_bio(pool, bio);
+ if (should_error_unserviceable_bio(pool))
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ else
+ while ((bio = bio_list_pop(&bios)))
+ retry_on_resume(bio);
}
static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1296,6 +1343,11 @@
}
}
+static void process_bio_success(struct thin_c *tc, struct bio *bio)
+{
+ bio_endio(bio, 0);
+}
+
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
bio_io_error(bio);
@@ -1328,6 +1380,11 @@
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ continue;
+ }
+
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
@@ -1357,7 +1414,8 @@
bio_list_init(&pool->deferred_flush_bios);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
+ if (bio_list_empty(&bios) &&
+ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
@@ -1393,51 +1451,134 @@
/*----------------------------------------------------------------*/
+struct noflush_work {
+ struct work_struct worker;
+ struct thin_c *tc;
+
+ atomic_t complete;
+ wait_queue_head_t wait;
+};
+
+static void complete_noflush_work(struct noflush_work *w)
+{
+ atomic_set(&w->complete, 1);
+ wake_up(&w->wait);
+}
+
+static void do_noflush_start(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = true;
+ requeue_io(w->tc);
+ complete_noflush_work(w);
+}
+
+static void do_noflush_stop(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = false;
+ complete_noflush_work(w);
+}
+
+static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+{
+ struct noflush_work w;
+
+ INIT_WORK(&w.worker, fn);
+ w.tc = tc;
+ atomic_set(&w.complete, 0);
+ init_waitqueue_head(&w.wait);
+
+ queue_work(tc->pool->wq, &w.worker);
+
+ wait_event(w.wait, atomic_read(&w.complete));
+}
+
+/*----------------------------------------------------------------*/
+
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
+static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+{
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s mode",
+ dm_device_name(pool->pool_md), new_mode);
+}
+
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
- int r;
- enum pool_mode old_mode = pool->pf.mode;
+ struct pool_c *pt = pool->ti->private;
+ bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
+ enum pool_mode old_mode = get_pool_mode(pool);
+
+ /*
+ * Never allow the pool to transition to PM_WRITE mode if user
+ * intervention is required to verify metadata and data consistency.
+ */
+ if (new_mode == PM_WRITE && needs_check) {
+ DMERR("%s: unable to switch pool to write mode until repaired.",
+ dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ new_mode = old_mode;
+ else
+ new_mode = PM_READ_ONLY;
+ }
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode.
+ */
+ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
switch (new_mode) {
case PM_FAIL:
if (old_mode != new_mode)
- DMERR("%s: switching pool to failure mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
pool->process_prepared_discard = process_prepared_discard_fail;
+
+ error_retry_list(pool);
break;
case PM_READ_ONLY:
if (old_mode != new_mode)
- DMERR("%s: switching pool to read-only mode",
- dm_device_name(pool->pool_md));
- r = dm_pool_abort_metadata(pool->pmd);
- if (r) {
- DMERR("%s: aborting transaction failed",
- dm_device_name(pool->pool_md));
- new_mode = PM_FAIL;
- set_pool_mode(pool, new_mode);
- } else {
- dm_pool_metadata_read_only(pool->pmd);
- pool->process_bio = process_bio_read_only;
- pool->process_discard = process_discard;
- pool->process_prepared_mapping = process_prepared_mapping_fail;
- pool->process_prepared_discard = process_prepared_discard_passdown;
- }
+ notify_of_pool_mode_change(pool, "read-only");
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_bio_success;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ error_retry_list(pool);
+ break;
+
+ case PM_OUT_OF_DATA_SPACE:
+ /*
+ * Ideally we'd never hit this state; the low water mark
+ * would trigger userland to extend the pool before we
+ * completely run out of data space. However, many small
+ * IOs to unprovisioned space can consume data space at an
+ * alarming rate. Adjust your low water mark if you're
+ * frequently seeing this mode.
+ */
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool, "out-of-data-space");
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
break;
case PM_WRITE:
if (old_mode != new_mode)
- DMINFO("%s: switching pool to write mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "write");
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1447,32 +1588,35 @@
}
pool->pf.mode = new_mode;
+ /*
+ * The pool mode may have changed, sync it so bind_control_target()
+ * doesn't cause an unexpected mode transition on resume.
+ */
+ pt->adjusted_pf.mode = new_mode;
}
-/*
- * Rather than calling set_pool_mode directly, use these which describe the
- * reason for mode degradation.
- */
-static void out_of_data_space(struct pool *pool)
+static void abort_transaction(struct pool *pool)
{
- DMERR_LIMIT("%s: no free data space available.",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
+ const char *dev_name = dm_device_name(pool->pool_md);
+
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_pool_abort_metadata(pool->pmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
+
+ if (dm_pool_metadata_set_needs_check(pool->pmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
}
static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{
- dm_block_t free_blocks;
-
DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
dm_device_name(pool->pool_md), op, r);
- if (r == -ENOSPC &&
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
- !free_blocks)
- DMERR_LIMIT("%s: no free metadata space available.",
- dm_device_name(pool->pool_md));
-
+ abort_transaction(pool);
set_pool_mode(pool, PM_READ_ONLY);
}
@@ -1523,6 +1667,11 @@
thin_hook_bio(tc, bio);
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ return DM_MAPIO_SUBMITTED;
+ }
+
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
@@ -1686,7 +1835,7 @@
/*
* We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
- enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode old_mode = get_pool_mode(pool);
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
@@ -1700,16 +1849,6 @@
pool->pf = pt->adjusted_pf;
pool->low_water_blocks = pt->low_water_blocks;
- /*
- * If we were in PM_FAIL mode, rollback of metadata failed. We're
- * not going to recover without a thin_repair. So we never let the
- * pool move out of the old mode. On the other hand a PM_READ_ONLY
- * may have been due to a lack of metadata or data space, and may
- * now work (ie. if the underlying devices have been resized).
- */
- if (old_mode == PM_FAIL)
- new_mode = old_mode;
-
set_pool_mode(pool, new_mode);
return 0;
@@ -1999,16 +2138,27 @@
dm_table_event(pool->ti->table);
}
-static sector_t get_metadata_dev_size(struct block_device *bdev)
+static sector_t get_dev_size(struct block_device *bdev)
{
- sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static void warn_if_metadata_device_too_big(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_dev_size(bdev);
char buffer[BDEVNAME_SIZE];
- if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
- metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
- }
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_dev_size(bdev);
+
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
+ metadata_dev_size = THIN_METADATA_MAX_SECTORS;
return metadata_dev_size;
}
@@ -2017,7 +2167,7 @@
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
- sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
return metadata_dev_size;
}
@@ -2095,12 +2245,7 @@
ti->error = "Error opening metadata block device";
goto out_unlock;
}
-
- /*
- * Run for the side-effect of possibly issuing a warning if the
- * device is too big.
- */
- (void) get_metadata_dev_size(metadata_dev->bdev);
+ warn_if_metadata_device_too_big(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -2246,6 +2391,12 @@
return -EINVAL;
} else if (data_size > sb_data_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the data device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
if (sb_data_size)
DMINFO("%s: growing the data device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
@@ -2287,6 +2438,13 @@
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the metadata device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
+ warn_if_metadata_device_too_big(pool->md_dev);
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
@@ -2673,7 +2831,9 @@
else
DMEMIT("- ");
- if (pool->pf.mode == PM_READ_ONLY)
+ if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ DMEMIT("out_of_data_space ");
+ else if (pool->pf.mode == PM_READ_ONLY)
DMEMIT("ro ");
else
DMEMIT("rw ");
@@ -2787,7 +2947,7 @@
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2894,6 +3054,7 @@
if (get_pool_mode(tc->pool) == PM_FAIL) {
ti->error = "Couldn't open thin device, Pool is in fail mode";
+ r = -EINVAL;
goto bad_thin_open;
}
@@ -2905,7 +3066,7 @@
r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
if (r)
- goto bad_thin_open;
+ goto bad_target_max_io_len;
ti->num_flush_bios = 1;
ti->flush_supported = true;
@@ -2926,6 +3087,8 @@
return 0;
+bad_target_max_io_len:
+ dm_pool_close_thin_device(tc->td);
bad_thin_open:
__pool_dec(tc->pool);
bad_pool_lookup:
@@ -2986,10 +3149,23 @@
return 0;
}
+static void thin_presuspend(struct dm_target *ti)
+{
+ struct thin_c *tc = ti->private;
+
+ if (dm_noflush_suspending(ti))
+ noflush_work(tc, do_noflush_start);
+}
+
static void thin_postsuspend(struct dm_target *ti)
{
- if (dm_noflush_suspending(ti))
- requeue_io((struct thin_c *)ti->private);
+ struct thin_c *tc = ti->private;
+
+ /*
+ * The dm_noflush_suspending flag has been cleared by now, so
+ * unfortunately we must always run this.
+ */
+ noflush_work(tc, do_noflush_stop);
}
/*
@@ -3074,12 +3250,13 @@
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
.map = thin_map,
.end_io = thin_endio,
+ .presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 19b2687..0c2dec7 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -6,3 +6,13 @@
---help---
Library providing immutable on-disk data structure support for
device-mapper targets such as the thin provisioning target.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+ boolean "Keep stack trace of persistent data block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
+ select STACKTRACE
+ ---help---
+ Enable this for messages that may help debug problems with the
+ block manager locking used by thin provisioning and caching.
+
+ If unsure, say N.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 536782e..786b689 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -91,6 +91,69 @@
dm_block_t block;
};
+struct bop_ring_buffer {
+ unsigned begin;
+ unsigned end;
+ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+};
+
+static void brb_init(struct bop_ring_buffer *brb)
+{
+ brb->begin = 0;
+ brb->end = 0;
+}
+
+static bool brb_empty(struct bop_ring_buffer *brb)
+{
+ return brb->begin == brb->end;
+}
+
+static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+{
+ unsigned r = old + 1;
+ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+}
+
+static int brb_push(struct bop_ring_buffer *brb,
+ enum block_op_type type, dm_block_t b)
+{
+ struct block_op *bop;
+ unsigned next = brb_next(brb, brb->end);
+
+ /*
+ * We don't allow the last bop to be filled, this way we can
+ * differentiate between full and empty.
+ */
+ if (next == brb->begin)
+ return -ENOMEM;
+
+ bop = brb->bops + brb->end;
+ bop->type = type;
+ bop->block = b;
+
+ brb->end = next;
+
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+{
+ struct block_op *bop;
+
+ if (brb_empty(brb))
+ return -ENODATA;
+
+ bop = brb->bops + brb->begin;
+ result->type = bop->type;
+ result->block = bop->block;
+
+ brb->begin = brb_next(brb, brb->begin);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
struct sm_metadata {
struct dm_space_map sm;
@@ -101,25 +164,20 @@
unsigned recursion_count;
unsigned allocated_this_transaction;
- unsigned nr_uncommitted;
- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+ struct bop_ring_buffer uncommitted;
struct threshold threshold;
};
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
{
- struct block_op *op;
+ int r = brb_push(&smm->uncommitted, type, b);
- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+ if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
}
- op = smm->uncommitted + smm->nr_uncommitted++;
- op->type = type;
- op->block = b;
-
return 0;
}
@@ -158,11 +216,17 @@
return -ENOMEM;
}
- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
- while (smm->nr_uncommitted && !r) {
- smm->nr_uncommitted--;
- r = commit_bop(smm, smm->uncommitted +
- smm->nr_uncommitted);
+ if (smm->recursion_count == 1) {
+ while (!brb_empty(&smm->uncommitted)) {
+ struct block_op bop;
+
+ r = brb_pop(&smm->uncommitted, &bop);
+ if (r) {
+ DMERR("bug in bop ring buffer");
+ break;
+ }
+
+ r = commit_bop(smm, &bop);
if (r)
break;
}
@@ -217,7 +281,8 @@
static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
- int r, i;
+ int r;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
unsigned adjustment = 0;
@@ -225,8 +290,10 @@
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -254,7 +321,8 @@
static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
- int r, i, adjustment = 0;
+ int r, adjustment = 0;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -262,8 +330,11 @@
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -671,7 +742,7 @@
smm->begin = superblock + 1;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -680,6 +751,8 @@
if (r)
return r;
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
r = sm_ll_extend(&smm->ll, nr_blocks);
if (r)
return r;
@@ -713,7 +786,7 @@
smm->begin = 0;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 39bba08..64df923 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -9,6 +9,17 @@
#include "dm-transaction-manager.h"
+#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
+
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about ~16k metadata blocks.
+ */
+#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64))
+#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE)
+
/*
* Unfortunately we have to use two-phase construction due to the cycle
* between the tm and sm.
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 13af7e5..8103e43 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -53,17 +53,25 @@
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the PMIC and CODEC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 ids for PMIC
+ * and CODEC, which must be different to operate together.
+ */
static struct i2c_device_id da9055_i2c_id[] = {
- {"da9055", 0},
+ {"da9055-pmic", 0},
{ }
};
+MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
- .name = "da9055",
+ .name = "da9055-pmic",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb..71aa14a 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@
};
MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
+#ifdef CONFIG_PM_SLEEP
static int max14577_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static struct of_device_id max14577_dt_match[] = {
{ .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3b..5adede0 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@
return pd;
}
-static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 612ca40..5d5e186 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@
return pd;
}
-static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (int)(long)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index a139798..714e213 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -315,6 +315,7 @@
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sec_pmic_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -349,6 +350,7 @@
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 966cf65..3cc4c70 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@
{
struct tps65217 *tps;
unsigned int version;
- unsigned int chip_id = ids->driver_data;
+ unsigned long chip_id = ids->driver_data;
const struct of_device_id *match;
bool status_off = false;
int ret;
@@ -170,7 +170,7 @@
"Failed to find matching dt id\n");
return -EINVAL;
}
- chip_id = (unsigned int)(unsigned long)match->data;
+ chip_id = (unsigned long)match->data;
status_off = of_property_read_bool(client->dev.of_node,
"ti,pmic-shutdown-controller");
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1b..e6fab94 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (int)of_id->data;
+ wm8994->type = (enum wm8994_type)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9b809cf..89a5579 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -666,7 +666,6 @@
goto err;
cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
@@ -678,6 +677,9 @@
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
+
+ cl->read_cb = cb;
+
return rets;
err:
mei_io_cb_free(cb);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000..95c8944 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@
nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 59eba5d..9715a7b 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1584,7 +1584,7 @@
}
if (mtd->ecc_stats.failed - ecc_failures) {
- if (retry_mode + 1 <= chip->read_retries) {
+ if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
ret = nand_setup_read_retry(mtd,
retry_mode);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ef4190a..bf642ce 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1633,6 +1633,7 @@
int i;
dma_cap_mask_t mask;
unsigned sig;
+ unsigned oob_index;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1826,11 +1827,14 @@
(mtd->writesize /
nand_chip->ecc.size);
if (nand_chip->options & NAND_BUSWIDTH_16)
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ oob_index = BADBLOCK_MARKER_LENGTH;
else
- ecclayout->eccpos[0] = 1;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1847,9 +1851,15 @@
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1883,9 +1893,12 @@
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* This ECC scheme requires ELM H/W block */
if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
pr_err("nand: error: could not initialize ELM\n");
@@ -1913,9 +1926,15 @@
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1956,9 +1975,12 @@
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
#else
pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
@@ -1972,11 +1994,8 @@
goto return_error;
}
- /* populate remaining ECC layout data */
- ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
- ecclayout->eccbytes);
- for (i = 1; i < ecclayout->eccbytes; i++)
- ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
pr_err("not enough OOB bytes required = %d, available=%d\n",
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index ead8613..c5dad65 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -463,8 +463,8 @@
}
}
if (found_orphan) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
@@ -846,16 +846,16 @@
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
return ret;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6d20fbd..dcde5605 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -181,7 +181,7 @@
*/
static inline void __disable_port(struct port *port)
{
- bond_set_slave_inactive_flags(port->slave);
+ bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -193,7 +193,7 @@
struct slave *slave = port->slave;
if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -2062,6 +2062,7 @@
struct list_head *iter;
struct slave *slave;
struct port *port;
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
read_lock(&bond->lock);
rcu_read_lock();
@@ -2119,8 +2120,19 @@
}
re_arm:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
rcu_read_unlock();
read_unlock(&bond->lock);
+
+ if (should_notify_rtnl && rtnl_trylock()) {
+ bond_slave_state_notify(bond);
+ rtnl_unlock();
+ }
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c4747..e8f133e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -730,7 +730,7 @@
client_info->ntt = 0;
}
- if (!vlan_get_tag(skb, &client_info->vlan_id))
+ if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1c6104d..e5628fc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -829,21 +829,25 @@
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active)
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
@@ -1193,6 +1197,11 @@
return -EBUSY;
}
+ if (bond_dev == slave_dev) {
+ pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+ return -EPERM;
+ }
+
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
@@ -1463,14 +1472,15 @@
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave,
+ BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
@@ -1488,7 +1498,7 @@
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1654,9 +1664,6 @@
return -EINVAL;
}
- /* release the slave from its bond */
- bond->slave_cnt--;
-
bond_sysfs_slave_del(slave);
bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1738,6 +1745,7 @@
unblock_netpoll_tx();
synchronize_rcu();
+ bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
@@ -2015,7 +2023,8 @@
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
bond->params.mode == BOND_MODE_8023AD)
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2562,7 +2571,8 @@
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
- bond->current_arp_slave);
+ bond->current_arp_slave,
+ BOND_SLAVE_NOTIFY_NOW);
bond->current_arp_slave = NULL;
}
@@ -2582,7 +2592,8 @@
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2615,17 +2626,17 @@
/*
* Send ARP probes for active-backup mode ARP monitor.
+ *
+ * Called with rcu_read_lock hold.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
struct slave *slave, *before = NULL, *new_slave = NULL,
- *curr_arp_slave, *curr_active_slave;
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
+ *curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
-
- rcu_read_lock();
- curr_arp_slave = rcu_dereference(bond->current_arp_slave);
- curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
if (curr_arp_slave && curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
@@ -2634,32 +2645,23 @@
if (curr_active_slave) {
bond_arp_send_all(bond, curr_active_slave);
- rcu_read_unlock();
- return true;
+ return should_notify_rtnl;
}
- rcu_read_unlock();
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
- if (!rtnl_trylock())
- return false;
- /* curr_arp_slave might have gone away */
- curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
-
if (!curr_arp_slave) {
- curr_arp_slave = bond_first_slave(bond);
- if (!curr_arp_slave) {
- rtnl_unlock();
- return true;
- }
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
+ return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2677,7 +2679,8 @@
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_LATER);
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
@@ -2689,26 +2692,31 @@
if (!new_slave && before)
new_slave = before;
- if (!new_slave) {
- rtnl_unlock();
- return true;
- }
+ if (!new_slave)
+ goto check_state;
new_slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(new_slave);
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
- rtnl_unlock();
- return true;
+check_state:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
+ return should_notify_rtnl;
}
static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
- bool should_notify_peers = false, should_commit = false;
+ bool should_notify_peers = false;
+ bool should_notify_rtnl = false;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2717,11 +2725,12 @@
goto re_arm;
rcu_read_lock();
- should_notify_peers = bond_should_notify_peers(bond);
- should_commit = bond_ab_arp_inspect(bond);
- rcu_read_unlock();
- if (should_commit) {
+ should_notify_peers = bond_should_notify_peers(bond);
+
+ if (bond_ab_arp_inspect(bond)) {
+ rcu_read_unlock();
+
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
@@ -2730,23 +2739,28 @@
}
bond_ab_arp_commit(bond);
+
rtnl_unlock();
+ rcu_read_lock();
}
- if (!bond_ab_arp_probe(bond)) {
- /* rtnl locking failed, re-arm */
- delta_in_ticks = 1;
- should_notify_peers = false;
- }
+ should_notify_rtnl = bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- if (should_notify_peers) {
+ if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
+ if (should_notify_rtnl)
+ bond_slave_state_notify(bond);
+
rtnl_unlock();
}
}
@@ -3046,9 +3060,11 @@
bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
}
}
read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c378784..298c265 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -121,6 +121,7 @@
static struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
};
static struct bond_option bond_opts[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 86ccfb9..2b0fdec 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -195,7 +195,8 @@
s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
- inactive:1; /* indicates inactive slave */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1; /* indicateds whether the state changed */
u8 duplex;
u32 original_mtu;
u32 link_failure_count;
@@ -303,6 +304,24 @@
}
}
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
static inline void bond_slave_state_change(struct bonding *bond)
{
struct list_head *iter;
@@ -316,6 +335,19 @@
}
}
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ tmp->should_notify = 0;
+ }
+ }
+}
+
static inline int bond_slave_state(struct slave *slave)
{
return slave->backup;
@@ -343,6 +375,9 @@
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
@@ -394,17 +429,19 @@
}
#endif
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
{
if (!bond_is_lb(slave->bond))
- bond_set_backup_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
-static inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
{
- bond_set_active_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 320bef2..61376ab 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,8 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+#define FLEXCAN_TIMEOUT_US (50)
+
/*
* FLEXCAN hardware feature flags
*
@@ -262,6 +264,22 @@
}
#endif
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
u32 reg_esr)
{
@@ -269,26 +287,95 @@
(reg_esr & FLEXCAN_ESR_ERR_BUS);
}
-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(®s->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
flexcan_write(reg, ®s->mcr);
- udelay(10);
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+static int flexcan_chip_disable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(®s->mcr);
reg |= FLEXCAN_MCR_MDIS;
flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_freeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ u32 reg;
+
+ reg = flexcan_read(®s->mcr);
+ reg |= FLEXCAN_MCR_HALT;
+ flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(100, 200);
+
+ if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(®s->mcr);
+ reg &= ~FLEXCAN_MCR_HALT;
+ flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)
+ return -ETIMEDOUT;
+
+ return 0;
}
static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -709,19 +796,14 @@
u32 reg_mcr, reg_ctrl;
/* enable module */
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
/* soft reset */
- flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
- udelay(10);
-
- reg_mcr = flexcan_read(®s->mcr);
- if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
- netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
- reg_mcr);
- err = -ENODEV;
- goto out;
- }
+ err = flexcan_chip_softreset(priv);
+ if (err)
+ goto out_chip_disable;
flexcan_set_bittiming(dev);
@@ -788,16 +870,14 @@
if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
flexcan_write(0x0, ®s->rxfgmask);
- if (priv->reg_xceiver) {
- err = regulator_enable(priv->reg_xceiver);
- if (err)
- goto out;
- }
+ err = flexcan_transceiver_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* synchronize with the can bus */
- reg_mcr = flexcan_read(®s->mcr);
- reg_mcr &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg_mcr, ®s->mcr);
+ err = flexcan_chip_unfreeze(priv);
+ if (err)
+ goto out_transceiver_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -810,7 +890,9 @@
return 0;
- out:
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
+ out_chip_disable:
flexcan_chip_disable(priv);
return err;
}
@@ -825,18 +907,17 @@
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg;
+
+ /* freeze + disable module */
+ flexcan_chip_freeze(priv);
+ flexcan_chip_disable(priv);
/* Disable all interrupts */
flexcan_write(0, ®s->imask1);
+ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ ®s->ctrl);
- /* Disable + halt module */
- reg = flexcan_read(®s->mcr);
- reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
- flexcan_write(reg, ®s->mcr);
-
- if (priv->reg_xceiver)
- regulator_disable(priv->reg_xceiver);
+ flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return;
@@ -866,7 +947,7 @@
/* start chip and queuing */
err = flexcan_chip_start(dev);
if (err)
- goto out_close;
+ goto out_free_irq;
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -875,6 +956,8 @@
return 0;
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_close:
close_candev(dev);
out_disable_per:
@@ -945,12 +1028,16 @@
goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ goto out_disable_per;
reg = flexcan_read(®s->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
flexcan_write(reg, ®s->ctrl);
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
reg = flexcan_read(®s->mcr);
@@ -967,14 +1054,15 @@
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out_disable_per;
+ goto out_chip_disable;
}
err = register_candev(dev);
- out_disable_per:
/* disable core and turn off clocks */
+ out_chip_disable:
flexcan_chip_disable(priv);
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
@@ -1104,9 +1192,10 @@
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
-
+ netif_napi_del(&priv->napi);
free_candev(dev);
return 0;
@@ -1117,8 +1206,11 @@
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
if (netif_running(dev)) {
netif_stop_queue(dev);
@@ -1139,9 +1231,7 @@
netif_device_attach(dev);
netif_start_queue(dev);
}
- flexcan_chip_enable(priv);
-
- return 0;
+ return flexcan_chip_enable(priv);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 2e45f6e..380d249 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1248,19 +1248,13 @@
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used for descriptors.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA config, aborting\n");
- goto out_pci_disable;
- }
+ dev_err(&pdev->dev, "No usable DMA config, aborting\n");
+ goto out_pci_disable;
}
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d5c2d3e..422aab2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2436,7 +2436,7 @@
err_register:
err_sw_init:
err_eeprom:
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
@@ -2474,7 +2474,7 @@
unregister_netdev(netdev);
atl1e_free_ring_resources(adapter);
atl1e_force_ps(&adapter->hw);
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 1f7b5aa..8a7bf7d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1484,6 +1484,10 @@
add_timer(&bp->timer);
b44_enable_ints(bp);
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_start(bp->phydev);
+
netif_start_queue(dev);
out:
return err;
@@ -1646,6 +1650,9 @@
netif_stop_queue(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_stop(bp->phydev);
+
napi_disable(&bp->napi);
del_timer_sync(&bp->timer);
@@ -2222,7 +2229,12 @@
}
if (status_changed) {
- b44_check_phy(bp);
+ u32 val = br32(bp, B44_TX_CTRL);
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
phy_print_status(phydev);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index cda25ac..6c9e1c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2507,6 +2507,7 @@
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
+ bp->fw_last_msg = msg_data;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
@@ -4000,8 +4001,23 @@
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+ u32 val;
+
+ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ return;
+ }
+ /* Tell firmware not to power down the PHY yet, otherwise
+ * the chip will take a long time to respond to MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+ val | BNX2_PORT_FEATURE_ASF_ENABLED);
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+ }
}
@@ -4033,9 +4049,22 @@
if (bp->wol)
pci_set_power_state(bp->pdev, PCI_D3hot);
- } else {
- pci_set_power_state(bp->pdev, PCI_D3hot);
+ break;
+
}
+ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ u32 val;
+
+ /* Tell firmware not to power down the PHY yet,
+ * otherwise the other port may not respond to
+ * MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ val &= ~BNX2_CONDITION_PM_STATE_MASK;
+ val |= BNX2_CONDITION_PM_STATE_UNPREP;
+ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+ }
+ pci_set_power_state(bp->pdev, PCI_D3hot);
/* No more memory access after this point until
* device is brought back to D0.
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f1cf2c4..e341bc3 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6900,6 +6900,7 @@
u16 fw_wr_seq;
u16 fw_drv_pulse_wr_seq;
+ u32 fw_last_msg;
int rx_max_ring;
int rx_ring_size;
@@ -7406,6 +7407,10 @@
#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 66c0df7..dbcff50 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3875,7 +3875,9 @@
xmit_type);
}
- /* Add the macs to the parsing BD this is a vf */
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
if (IS_VF(bp)) {
/* override GRE parameters in BD */
bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3887,6 +3889,11 @@
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
}
SET_FLAG(pbd_e2_parsing_data,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index fcf9105a5..09f3fef 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@
while (retry < 3) {
rc = 0;
rcu_read_lock();
- ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
- dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
+ dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@@ -785,7 +785,7 @@
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@
if (!use_pg_tbl)
return 0;
- dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
- ~(BNX2_PAGE_SIZE - 1);
+ dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+ ~(CNIC_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
- cp->ctx_blk_size = BNX2_PAGE_SIZE;
- cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
+ cp->ctx_blk_size = CNIC_PAGE_SIZE;
+ cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@
if (udev->l2_ring)
return 0;
- udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
+ udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1113,7 +1113,7 @@
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
- pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
- PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+ CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
- n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+ n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
@@ -1296,7 +1296,7 @@
goto error;
}
- pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
@@ -1466,8 +1466,8 @@
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
- hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+ hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@
}
ctx->cid = cid;
- pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
@@ -1760,7 +1760,7 @@
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
- req2->rq_page_table_addr_lo & PAGE_MASK;
+ req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
- req1->cq_page_table_addr_lo & PAGE_MASK;
+ req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -3244,7 +3244,8 @@
int rc;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+ lockdep_is_held(&cnic_lock));
if (ulp_ops && ulp_ops->cnic_get_stats)
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
else
@@ -4384,7 +4385,7 @@
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
- memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
+ memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+ rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -4709,10 +4710,10 @@
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
- if (BNX2_PAGE_BITS > 12)
+ if (CNIC_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
- val |= (BNX2_PAGE_BITS - 8) << 4;
+ val |= (CNIC_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@@ -4742,13 +4743,13 @@
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
- memset(txbd, 0, BNX2_PAGE_SIZE);
+ memset(txbd, 0, CNIC_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
- BNX2_PAGE_SIZE);
+ CNIC_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@@ -5265,8 +5266,8 @@
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
- memset(rx_ring, 0, BNX2_PAGE_SIZE);
+ rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+ memset(rx_ring, 0, CNIC_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0d6b13f..d535ae4 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 95a8e4b..dcbca69 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 8cf6b192..5f4d557 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.19"
-#define CNIC_MODULE_RELDATE "December 19, 2013"
+#define CNIC_MODULE_VERSION "2.5.20"
+#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -24,6 +24,16 @@
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
+/* Use CPU native page size up to 16K for cnic ring sizes. */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS 14
+#else
+#define CNIC_PAGE_BITS PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
+
struct kwqe {
u32 kwqe_op_flag;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3167ed6..70a225c8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6843,8 +6843,7 @@
work_mask |= opaque_key;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ if (desc->err_vlan & RXD_ERR_MASK) {
drop_it:
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -17650,8 +17649,6 @@
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@@ -17683,7 +17680,8 @@
features |= NETIF_F_TSO_ECN;
}
- dev->features |= features;
+ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ef47238..04321e5 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2608,7 +2608,11 @@
#define RXD_ERR_TOO_SMALL 0x00400000
#define RXD_ERR_NO_RESOURCES 0x00800000
#define RXD_ERR_HUGE_FRAME 0x01000000
-#define RXD_ERR_MASK 0xffff0000
+
+#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
+ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
+ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
+ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
u32 reserved;
u32 opaque;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 1803c39..354ae979 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1704,7 +1704,7 @@
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cf64f3d..4ad1187 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -707,7 +707,8 @@
else
skb_checksum_none_assert(skb);
- if (flags & BNA_CQ_EF_VLAN)
+ if ((flags & BNA_CQ_EF_VLAN) &&
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
@@ -2094,7 +2095,9 @@
rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
}
- rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rx_config->vlan_strip_status =
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
+ BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
}
static void
@@ -3245,11 +3248,6 @@
BNA_RXMODE_ALLMULTI;
bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- if (bnad->cfg_flags & BNAD_CF_PROMISC)
- bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
- else
- bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
-
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3374,6 +3372,27 @@
return 0;
}
+static int bnad_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnad *bnad = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
bnad_netpoll(struct net_device *netdev)
@@ -3421,6 +3440,7 @@
.ndo_change_mtu = bnad_change_mtu,
.ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+ .ndo_set_features = bnad_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnad_netpoll
#endif
@@ -3433,14 +3453,14 @@
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3190d38e..d0c38e0 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -632,11 +632,16 @@
"Unable to allocate sk_buff\n");
break;
}
- bp->rx_skbuff[entry] = skb;
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -725,7 +730,7 @@
skb_put(skb, len);
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
- len, DMA_FROM_DEVICE);
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
skb_checksum_none_assert(skb);
@@ -1036,11 +1041,15 @@
}
entry = macb_tx_ring_wrap(bp->tx_head);
- bp->tx_head++;
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto unlock;
+ }
+ bp->tx_head++;
tx_skb = &bp->tx_skb[entry];
tx_skb->skb = skb;
tx_skb->mapping = mapping;
@@ -1066,6 +1075,7 @@
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
+unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 43ab35f..34e2488 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6179,6 +6179,7 @@
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
+ .shutdown = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d09615..05529e2 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -350,11 +350,13 @@
u32 roce_drops_crc;
};
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID 0xFFFF
+
struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
- u16 def_vid;
u16 vlan_tag;
u32 tx_rate;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 04ac9c6..36c8061 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -913,24 +913,14 @@
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
-static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
- struct sk_buff *skb,
- bool *skip_hw_vlan)
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
- * may cause a transmit stall on that port. So the work-around is to
- * pad short packets (<= 32 bytes) to a 36-byte length.
- */
- if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
- goto tx_drop;
- skb->len = 36;
- }
-
/* For padded packets, BE HW modifies tot_len field in IP header
* incorrecly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
@@ -959,7 +949,7 @@
vlan_tx_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
/* HW may lockup when VLAN HW tagging is requested on
@@ -981,15 +971,39 @@
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
return skb;
tx_drop:
dev_kfree_skb_any(skb);
+err:
return NULL;
}
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
+{
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
+ * less may cause a transmit stall on that port. So the work-around is
+ * to pad short packets (<= 32 bytes) to a 36-byte length.
+ */
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+ if (skb_padto(skb, 36))
+ return NULL;
+ skb->len = 36;
+ }
+
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+ if (!skb)
+ return NULL;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1157,6 +1171,14 @@
return status;
}
+static void be_clear_promisc(struct be_adapter *adapter)
+{
+ adapter->promiscuous = false;
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+}
+
static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1170,9 +1192,7 @@
/* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
- adapter->promiscuous = false;
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
+ be_clear_promisc(adapter);
if (adapter->vlans_added)
be_vid_config(adapter);
}
@@ -1287,24 +1307,20 @@
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan) {
- /* If this is new value, program it. Else skip. */
- vf_cfg->vlan_tag = vlan;
+ if (vf_cfg->vlan_tag != vlan)
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
vf_cfg->if_handle, 0);
- }
} else {
/* Reset Transparent Vlan Tagging. */
- vf_cfg->vlan_tag = 0;
- vlan = vf_cfg->def_vid;
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
+ vf + 1, vf_cfg->if_handle, 0);
}
-
- if (status)
+ if (!status)
+ vf_cfg->vlan_tag = vlan;
+ else
dev_info(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed\n", vlan, vf);
+ "VLAN %d config on VF %d failed\n", vlan, vf);
return status;
}
@@ -3013,11 +3029,11 @@
static int be_vf_setup(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg;
- u16 def_vlan, lnk_speed;
- int status, old_vfs, vf;
struct device *dev = &adapter->pdev->dev;
+ struct be_vf_cfg *vf_cfg;
+ int status, old_vfs, vf;
u32 privileges;
+ u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3084,12 +3100,6 @@
if (!status)
vf_cfg->tx_rate = lnk_speed;
- status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle, NULL);
- if (status)
- goto err;
- vf_cfg->def_vid = def_vlan;
-
if (!old_vfs)
be_cmd_enable_vf(adapter, vf + 1);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 903362a..03a3513 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -389,12 +389,6 @@
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
- /* Send it on its way. Tell FEC it's ready, interrupt when done,
- * it's the last BD of the frame, and to put the CRC on the end.
- */
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
if (fep->bufdesc_ex) {
@@ -416,6 +410,13 @@
}
}
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
@@ -527,13 +528,6 @@
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Setup multicast filter. */
- set_multicast_list(ndev);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
@@ -654,6 +648,13 @@
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4be9715..1fc8334 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -522,10 +522,21 @@
return rc;
}
+static u64 ibmveth_encode_mac_addr(u8 *mac)
+{
+ int i;
+ u64 encoded = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ encoded = (encoded << 8) | mac[i];
+
+ return encoded;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- u64 mac_address = 0;
+ u64 mac_address;
int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
@@ -579,8 +590,7 @@
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
- mac_address = mac_address >> 16;
+ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -1183,8 +1193,8 @@
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
- unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
+ u64 mcast_addr;
+ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1372,9 +1382,6 @@
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
-
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1383,7 +1390,7 @@
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 451ba79..1f37499 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -138,7 +138,6 @@
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
- unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f..8d76fca 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -161,7 +163,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@@ -710,35 +712,6 @@
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PSC_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2756,12 +2729,15 @@
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ else
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
- mvneta_gmac_rgmii_set(pp, 1);
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
@@ -2774,6 +2750,7 @@
static int mvneta_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram_target_info;
+ struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
u32 phy_addr;
@@ -2838,9 +2815,15 @@
clk_prepare_enable(pp->clk);
- pp->base = of_iomap(dn, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto err_clk;
+ }
+
+ pp->base = devm_ioremap_resource(&pdev->dev, res);
if (pp->base == NULL) {
- err = -ENOMEM;
+ err = PTR_ERR(pp->base);
goto err_clk;
}
@@ -2848,7 +2831,7 @@
pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
- goto err_unmap;
+ goto err_clk;
}
for_each_possible_cpu(cpu) {
@@ -2913,8 +2896,6 @@
mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
-err_unmap:
- iounmap(pp->base);
err_clk:
clk_disable_unprepare(pp->clk);
err_free_irq:
@@ -2934,7 +2915,6 @@
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
- iounmap(pp->base);
irq_dispose_mapping(dev->irq);
free_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fad4531..84a96f7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -742,6 +742,14 @@
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
+ if (err)
+ return err;
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+ err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
+ &priv->tunnel_reg_id);
return err;
}
}
@@ -1792,6 +1800,8 @@
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
+ if (mclist->tunnel_reg_id)
+ mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 91b69ff..7e2995e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -129,13 +129,14 @@
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support",
+ [3] = "Device managed flow steering support",
[4] = "Automatic MAC reassignment support",
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support",
- [9] = "TCP/IP offloads/flow-steering for VXLAN support"
+ [9] = "Device managed flow steering IPoIB support",
+ [10] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -859,7 +860,7 @@
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
/* For guests, disable vxlan tunneling */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
field &= 0xf7;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
@@ -869,7 +870,7 @@
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 */
- MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
@@ -883,7 +884,7 @@
}
/* turn off ipoib managed steering for guests */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
field &= ~0x80;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158..d413e60 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -150,6 +150,8 @@
struct pci_dev *pdev;
};
+static atomic_t pf_loading = ATOMIC_INIT(0);
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -749,7 +751,7 @@
has_eth_port = true;
}
- if (has_ib_port)
+ if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
request_module_nowait(IB_DRV_NAME);
if (has_eth_port)
request_module_nowait(EN_DRV_NAME);
@@ -1407,6 +1409,11 @@
u32 slave_read;
u32 cmd_channel_ver;
+ if (atomic_read(&pf_loading)) {
+ mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
mlx4_warn(dev, "Sending reset\n");
@@ -2319,7 +2326,11 @@
if (num_vfs) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
+
+ atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, num_vfs);
+ atomic_dec(&pf_loading);
+
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
@@ -2670,7 +2681,11 @@
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- int ret = __mlx4_init_one(pdev, 0);
+ const struct pci_device_id *id;
+ int ret;
+
+ id = pci_match_id(mlx4_pci_table, pdev);
+ ret = __mlx4_init_one(pdev, id->driver_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
@@ -2684,6 +2699,7 @@
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
+ .shutdown = mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6b65f77..7aec6c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -51,8 +51,8 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "1.1"
-#define DRV_RELDATE "Dec, 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb, 2014"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9ca223b..b57e8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -57,8 +57,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "Dec 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a064f06..23b7e2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,8 +46,8 @@
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a..e0c92e0 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -83,6 +84,7 @@
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
};
static int msg_enable;
@@ -1414,6 +1417,21 @@
ks->spidev = spi;
ks->tx_space = 6144;
+ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ if (ret == -EPROBE_DEFER)
+ goto err_reg;
+ } else {
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator enable fail: %d\n",
+ ret);
+ goto err_reg_en;
+ }
+ }
+
+
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1508,8 +1526,14 @@
err_netdev:
free_irq(ndev->irq, ks);
-err_id:
err_irq:
+err_id:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_disable(ks->vdd_reg);
+err_reg_en:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_put(ks->vdd_reg);
+err_reg:
free_netdev(ndev);
return ret;
}
@@ -1523,6 +1547,10 @@
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
+ if (!IS_ERR(priv->vdd_reg)) {
+ regulator_disable(priv->vdd_reg);
+ regulator_put(priv->vdd_reg);
+ }
free_netdev(priv->netdev);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4146664..27c4f13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -340,6 +340,7 @@
if (qlcnic_sriov_vf_check(adapter))
return -EINVAL;
num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 77f1bce..7d4f549 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -807,7 +807,7 @@
!type->tc_param_valid)
return;
- if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
return;
tc_cfg = &type->tc_cfg[tc];
@@ -843,7 +843,7 @@
!type->tc_param_valid)
return;
- if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
return;
pgcfg = &type->pg_cfg[pgid];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ba78c74..1222865 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -816,9 +816,10 @@
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (err)
return err;
}
}
@@ -3863,7 +3864,7 @@
strcpy(buf, "Tx");
}
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 09acf15..e5277a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -13,8 +13,6 @@
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
#define QLC_MAC_OPCODE_MASK 0x7
-#define QLC_MAC_STAR_ADD 6
-#define QLC_MAC_STAR_DEL 7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
@@ -1206,13 +1204,6 @@
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
- if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
- ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
- netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
- vf->pci_func);
- return -EINVAL;
- }
-
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfdd..656c65d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4765,7 +4765,9 @@
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91a67ae..3ff7bc3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -209,7 +209,7 @@
[RTL_GIGA_MAC_VER_16] =
_R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_18] =
_R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_19] =
@@ -7118,6 +7118,8 @@
}
mutex_init(&tp->wk.mutex);
+ u64_stats_init(&tp->rx_stats.syncp);
+ u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index eb75fbd..d7a3682 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1668,6 +1668,13 @@
struct efx_ptp_data *ptp = efx->ptp_data;
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+ if (!ptp) {
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ return;
+ }
+
if (!ptp->enabled)
return;
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 72d282b..c553f6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -151,7 +151,7 @@
sizeof(struct dma_desc)));
}
-const struct stmmac_chain_mode_ops chain_mode_ops = {
+const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7834a39..74610f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -419,20 +419,13 @@
unsigned int data; /* MII Data */
};
-struct stmmac_ring_mode_ops {
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*init_desc3) (struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
- int (*set_16kib_bfsize) (int mtu);
-};
-
-struct stmmac_chain_mode_ops {
+struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
};
@@ -441,8 +434,7 @@
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
- const struct stmmac_ring_mode_ops *ring;
- const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
@@ -460,7 +452,7 @@
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_ring_mode_ops ring_mode_ops;
-extern const struct stmmac_chain_mode_ops chain_mode_ops;
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a96c7c2..650a4be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -100,10 +100,9 @@
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- if (unlikely(priv->plat->has_gmac))
- /* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
@@ -126,7 +125,7 @@
return ret;
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a2e7d2c..8543e1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -92,8 +92,8 @@
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
-static int buf_sz = DMA_BUFFER_SIZE;
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -136,8 +136,8 @@
dma_rxsize = DMA_RX_SIZE;
if (unlikely(dma_txsize < 0))
dma_txsize = DMA_TX_SIZE;
- if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
flow_ctrl = FLOW_AUTO;
else if (likely(flow_ctrl < 0))
@@ -286,10 +286,25 @@
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
- /* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
- goto out;
+ int tx_lpi_timer = priv->tx_lpi_timer;
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1)) {
+ /* To manage at run-time if the EEE cannot be supported
+ * anymore (for example because the lp caps have been
+ * changed).
+ * In that case the driver disable own timers.
+ */
+ if (priv->eee_active) {
+ pr_debug("stmmac: disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
+ tx_lpi_timer);
+ }
+ priv->eee_active = 0;
+ goto out;
+ }
+ /* Activate the EEE and start timers */
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -300,13 +315,13 @@
priv->hw->mac->set_eee_timer(priv->ioaddr,
STMMAC_DEFAULT_LIT_LS,
- priv->tx_lpi_timer);
+ tx_lpi_timer);
} else
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->ioaddr,
priv->phydev->link);
- pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
ret = true;
}
@@ -886,10 +901,10 @@
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
- else if (mtu >= DMA_BUFFER_SIZE)
+ else if (mtu > DEFAULT_BUFSIZE)
ret = BUF_SIZE_2KiB;
else
- ret = DMA_BUFFER_SIZE;
+ ret = DEFAULT_BUFSIZE;
return ret;
}
@@ -951,9 +966,9 @@
p->des2 = priv->rx_skbuff_dma[i];
- if ((priv->mode == STMMAC_RING_MODE) &&
+ if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
- priv->hw->ring->init_desc3(p);
+ priv->hw->mode->init_desc3(p);
return 0;
}
@@ -984,11 +999,8 @@
unsigned int bfsize = 0;
int ret = -ENOMEM;
- /* Set the max buffer size according to the DESC mode
- * and the MTU. Note that RING mode allows 16KiB bsize.
- */
- if (priv->mode == STMMAC_RING_MODE)
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ if (priv->hw->mode->set_16kib_bfsize)
+ bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1029,15 +1041,15 @@
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
- priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
- priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
} else {
- priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
- priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
}
}
@@ -1288,7 +1300,7 @@
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0;
}
- priv->hw->ring->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
@@ -1705,7 +1717,7 @@
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
@@ -1844,6 +1856,7 @@
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1871,27 +1884,19 @@
first = desc;
/* To program the descriptors according to the size of the frame */
- if (priv->mode == STMMAC_RING_MODE) {
- is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->ring->jumbo_frm(priv, skb,
- csum_insertion);
- } else {
- is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->chain->jumbo_frm(priv, skb,
- csum_insertion);
- }
+ if (enh_desc)
+ is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
- } else
+ } else {
desc = first;
+ entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ }
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2029,7 +2034,7 @@
p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->ring->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
@@ -2633,11 +2638,11 @@
/* To use the chained or ring mode */
if (chain_mode) {
- priv->hw->chain = &chain_mode_ops;
+ priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
- priv->hw->ring = &ring_mode_ops;
+ priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index c61bc72b..8fb32a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -36,7 +36,7 @@
#ifdef CONFIG_DWMAC_STI
{ .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
- { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 651087b..7d6d8ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1164,11 +1164,17 @@
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
+ u32 slave_port;
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -2223,10 +2229,6 @@
goto clean_ale_ret;
}
- if (cpts_register(&pdev->dev, priv->cpts,
- data->cpts_clock_mult, data->cpts_clock_shift))
- dev_err(priv->dev, "error registering cpts device\n");
-
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 364d0c7..88ef270 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -355,7 +355,7 @@
int i;
spin_lock_irqsave(&ctlr->lock, flags);
- if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ if (ctlr->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@@ -891,7 +891,7 @@
unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
- if (chan->state != CPDMA_STATE_ACTIVE) {
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index cd9b164..8f0e69c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,9 +1532,9 @@
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
- int ret;
+ int q, m, ret;
+ int res_num = 0, irq_num = 0;
int i = 0;
- int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@
}
/* Request IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+ res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ dev_err(emac_dev, "Request IRQ %d\n", irq_num);
+ if (request_irq(irq_num, emac_irq, 0, ndev->name,
+ ndev)) {
+ dev_err(emac_dev,
+ "DaVinci EMAC: request_irq() failed\n");
+ ret = -EBUSY;
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- 0, ndev->name, ndev))
goto rollback;
+ }
}
- k++;
+ res_num++;
}
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@
return 0;
-rollback:
-
- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
- ret = -EBUSY;
err:
+ emac_int_disable(priv);
+ napi_disable(&priv->napi);
+
+rollback:
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
+
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
+ cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
return ret;
}
@@ -1659,6 +1680,9 @@
*/
static int emac_dev_stop(struct net_device *ndev)
{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@@ -1674,6 +1698,13 @@
if (priv->phydev)
phy_disconnect(priv->phydev);
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ef312bc..6ac20a6 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -923,7 +923,7 @@
if (rc) {
dev_err(&pdev->dev,
"32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out;
+ goto err_out_pci_disable;
}
/* sanity check */
@@ -931,7 +931,7 @@
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out;
+ goto err_out_pci_disable;
}
pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out;
+ goto err_out_pci_disable;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1084,6 +1084,8 @@
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
+err_out_pci_disable:
+ pci_disable_device(pdev);
err_out:
return rc;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7141a19..d6fce97 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -442,6 +442,8 @@
if (!net)
return -ENOMEM;
+ netif_carrier_off(net);
+
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
@@ -473,6 +475,8 @@
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
free_netdev(net);
+ } else {
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
}
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5d..b54fd25 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -243,6 +243,22 @@
return ret;
}
+static void rndis_set_link_state(struct rndis_device *rdev,
+ struct rndis_request *request)
+{
+ u32 link_status;
+ struct rndis_query_complete *query_complete;
+
+ query_complete = &request->response_msg.msg.query_complete;
+
+ if (query_complete->status == RNDIS_STATUS_SUCCESS &&
+ query_complete->info_buflen == sizeof(u32)) {
+ memcpy(&link_status, (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset), sizeof(u32));
+ rdev->link_state = link_status != 0;
+ }
+}
+
static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_message *resp)
{
@@ -272,6 +288,10 @@
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp,
resp->msg_len);
+ if (request->request_msg.ndis_msg_type ==
+ RNDIS_MSG_QUERY && request->request_msg.msg.
+ query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
+ rndis_set_link_state(dev, request);
} else {
netdev_err(ndev,
"rndis response buffer overflow "
@@ -620,7 +640,6 @@
ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
- dev->link_state = (link_status != 0) ? true : false;
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544..a30258a 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -546,12 +546,12 @@
int rc;
unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->irq_busy) {
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return -EBUSY;
}
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
might_sleep();
@@ -725,10 +725,11 @@
static irqreturn_t at86rf230_isr(int irq, void *data)
{
struct at86rf230_local *lp = data;
+ unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
lp->irq_busy = 1;
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
schedule_work(&lp->irqwork);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index c14d39b..d7b2e94 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -180,7 +180,8 @@
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
- dev->vlan_features |= IFB_FEATURES;
+ dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a5d2189..1831fb7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -506,6 +506,9 @@
static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
+#define ALWAYS_ON_FEATURES \
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -539,7 +542,7 @@
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= ALWAYS_ON_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
@@ -699,7 +702,7 @@
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
- features |= NETIF_F_LLTX;
+ features |= ALWAYS_ON_FEATURES;
return features;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca0..76d96b9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -164,9 +164,9 @@
* of that setting. Returns the index of the last setting if
* none of the others match.
*/
-static inline int phy_find_setting(int speed, int duplex)
+static inline unsigned int phy_find_setting(int speed, int duplex)
{
- int idx = 0;
+ unsigned int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
(settings[idx].speed != speed || settings[idx].duplex != duplex))
@@ -185,7 +185,7 @@
* the mask in features. Returns the index of the last setting
* if nothing else matches.
*/
-static inline int phy_find_valid(int idx, u32 features)
+static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
{
while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
idx++;
@@ -204,7 +204,7 @@
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
- int idx;
+ unsigned int idx;
/* Sanitize settings based on PHY capabilities */
if ((features & SUPPORTED_Autoneg) == 0)
@@ -954,7 +954,8 @@
(phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
- int idx, status;
+ int status;
+ unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 82514e7..2f6989b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -683,10 +683,9 @@
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
- struct ethtool_wolinfo wol;
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
/* If the device has WOL enabled, we cannot suspend the PHY */
- wol.cmd = ETHTOOL_GWOL;
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts)
return -EBUSY;
@@ -916,6 +915,8 @@
int err;
int lpa;
int lpagb = 0;
+ int common_adv;
+ int common_adv_gb = 0;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -937,7 +938,7 @@
phydev->lp_advertising =
mii_stat1000_to_ethtool_lpa_t(lpagb);
- lpagb &= adv << 2;
+ common_adv_gb = lpagb & adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
@@ -950,25 +951,25 @@
if (adv < 0)
return adv;
- lpa &= adv;
+ common_adv = lpa & adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
- if (lpagb & LPA_1000FULL)
+ if (common_adv_gb & LPA_1000FULL)
phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
- if (lpa & LPA_100FULL)
+ if (common_adv & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
- if (lpa & LPA_10FULL)
+ if (common_adv & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
if (phydev->duplex == DUPLEX_FULL) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8fe9cb7..26f8635 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1686,7 +1686,9 @@
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 433f0a0..e2797f1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,7 +11,7 @@
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 955df81..054e59c 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,20 +1029,12 @@
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- if (usb_device_no_sg_constraint(dev->udev))
- dev->can_dma_sg = 1;
-
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
- if (dev->can_dma_sg) {
- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- }
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1395,6 +1387,19 @@
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info dlink_dub1312_info = {
+ .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info sitecom_info = {
.description = "Sitecom USB 3.0 to Gigabit Adapter",
.bind = ax88179_bind,
@@ -1421,6 +1426,19 @@
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info lenovo_info = {
+ .description = "Lenovo OneLinkDock Gigabit LAN",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1431,6 +1449,10 @@
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
+ USB_DEVICE(0x2001, 0x4a00),
+ .driver_info = (unsigned long)&dlink_dub1312_info,
+}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072),
.driver_info = (unsigned long)&sitecom_info,
@@ -1438,6 +1460,10 @@
/* Samsung USB Ethernet Adapter */
USB_DEVICE(0x04e8, 0xa100),
.driver_info = (unsigned long)&samsung_info,
+}, {
+ /* Lenovo OneLinkDock Gigabit LAN */
+ USB_DEVICE(0x17ef, 0x304b),
+ .driver_info = (unsigned long)&lenovo_info,
},
{ },
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 42e1769..bd363b2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -652,6 +652,13 @@
.driver_info = 0,
},
+/* Samsung USB Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dbff290..d350d27 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -68,7 +68,6 @@
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
@@ -82,22 +81,22 @@
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ncm_parm,
- sizeof(ncm_parm));
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
/* there are some minor differences in NCM and MBIM defaults */
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -146,7 +145,7 @@
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -162,14 +161,6 @@
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
-
- /* Adding a pad byte here simplifies the handling in
- * cdc_ncm_fill_tx_frame, by making tx_max always
- * represent the real skb max size.
- */
- if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
-
}
/*
@@ -439,6 +430,10 @@
goto error2;
}
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev))
+ goto error2;
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
@@ -453,12 +448,6 @@
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev)) {
- dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
- goto error2;
- }
-
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
@@ -475,6 +464,15 @@
dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
+ /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
+ * outside the sane range. Adding a pad byte here if necessary
+ * simplifies the handling in cdc_ncm_fill_tx_frame, making
+ * tx_max always represent the real skb max size.
+ */
+ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
return 0;
error2:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d89dbe3..adb12f3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -449,9 +449,6 @@
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
-#define REALTEK_USB_DEVICE(vend, prod) \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
-
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
@@ -2739,6 +2736,12 @@
struct net_device *netdev;
int ret;
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
dev_err(&intf->dev, "Out of memory\n");
@@ -2819,9 +2822,9 @@
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
- {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
deleted file mode 100644
index f0a8791..0000000
--- a/drivers/net/usb/r815x.c
+++ /dev/null
@@ -1,248 +0,0 @@
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/usb/usbnet.h>
-
-#define RTL815x_REQT_READ 0xc0
-#define RTL815x_REQT_WRITE 0x40
-#define RTL815x_REQ_GET_REGS 0x05
-#define RTL815x_REQ_SET_REGS 0x05
-
-#define MCU_TYPE_PLA 0x0100
-#define OCP_BASE 0xe86c
-#define BASE_MII 0xa400
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-
-#define R815x_PHY_ID 32
-#define REALTEK_VENDOR_ID 0x0bda
-
-
-static int pla_read_word(struct usb_device *udev, u16 index)
-{
- int ret;
- u8 shift = index & 2;
- __le32 *tmp;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- index &= ~3;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out2;
-
- ret = __le32_to_cpu(*tmp);
- ret >>= (shift * 8);
- ret &= 0xffff;
-
-out2:
- kfree(tmp);
- return ret;
-}
-
-static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
-{
- __le32 *tmp;
- u32 mask = 0xffff;
- u16 byen = BYTE_EN_WORD;
- u8 shift = index & 2;
- int ret;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- data &= mask;
-
- if (shift) {
- byen <<= shift;
- mask <<= (shift * 8);
- data <<= (shift * 8);
- index &= ~3;
- }
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out3;
-
- data |= __le32_to_cpu(*tmp) & ~mask;
- *tmp = __cpu_to_le32(data);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
- 500);
-
-out3:
- kfree(tmp);
- return ret;
-}
-
-static int ocp_reg_read(struct usbnet *dev, u16 addr)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_read_word(dev->udev, ocp_index);
-
-out:
- return ret;
-}
-
-static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out1;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_write_word(dev->udev, ocp_index, data);
-
-out1:
- return ret;
-}
-
-static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
-{
- struct usbnet *dev = netdev_priv(netdev);
- int ret;
-
- if (phy_id != R815x_PHY_ID)
- return -EINVAL;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return -ENODEV;
-
- ret = ocp_reg_read(dev, BASE_MII + reg * 2);
-
- usb_autopm_put_interface(dev->intf);
- return ret;
-}
-
-static
-void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
-
- if (phy_id != R815x_PHY_ID)
- return;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return;
-
- ocp_reg_write(dev, BASE_MII + reg * 2, val);
-
- usb_autopm_put_interface(dev->intf);
-}
-
-static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 1;
-
- return status;
-}
-
-static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 0;
-
- return status;
-}
-
-static const struct driver_info r8152_info = {
- .description = "RTL8152 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8152_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct driver_info r8153_info = {
- .description = "RTL8153 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8153_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct usb_device_id products[] = {
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8152_info,
-},
-
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8153_info,
-},
-
- { }, /* END */
-};
-MODULE_DEVICE_TABLE(usb, products);
-
-static struct usb_driver r815x_driver = {
- .name = "r815x",
- .id_table = products,
- .probe = usbnet_probe,
- .disconnect = usbnet_disconnect,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .reset_resume = usbnet_resume,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(r815x_driver);
-
-MODULE_AUTHOR("Hayes Wang");
-MODULE_DESCRIPTION("Realtek USB ECM device");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dd10d58..f9e96c4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -752,14 +752,12 @@
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
- add_wait_queue(&unlink_wakeup, &wait);
+ add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@@ -773,15 +771,14 @@
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
- dev->wait = NULL;
- remove_wait_queue(&unlink_wakeup, &wait);
+ remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval;
+ int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -791,6 +788,8 @@
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
+ /* to not race resume */
+ pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@@ -817,6 +816,9 @@
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
- wake_up (dev->wait);
- }
+ /* waiting for all pending urbs to complete?
+ * only then can we forgo submitting anew
+ */
+ if (waitqueue_active(&dev->wait)) {
+ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+ wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- /* handle remote wakeup ASAP */
- if (!dev->wait &&
- netif_device_present(dev->net) &&
+ /* handle remote wakeup ASAP
+ * we cannot race against stop
+ */
+ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2ec2041..c0e7c64 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -285,7 +285,11 @@
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d75f8ed..841b608 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -671,8 +671,7 @@
if (err)
break;
} while (rq->vq->num_free);
- if (unlikely(!virtqueue_kick(rq->vq)))
- return false;
+ virtqueue_kick(rq->vq);
return !oom;
}
@@ -877,7 +876,7 @@
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+ if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -886,6 +885,7 @@
kfree_skb(skb);
return NETDEV_TX_OK;
}
+ virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -1711,7 +1711,8 @@
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786f..0fa3b44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1762,11 +1762,20 @@
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_all_intrs(adapter);
-
- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
- vmxnet3_enable_all_intrs(adapter);
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b0f705c..1236812 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1318,6 +1318,9 @@
neigh_release(n);
+ if (reply == NULL)
+ goto out;
+
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1339,15 +1342,103 @@
}
#if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ struct neighbour *n, bool isrouter)
+{
+ struct net_device *dev = request->dev;
+ struct sk_buff *reply;
+ struct nd_msg *ns, *na;
+ struct ipv6hdr *pip6;
+ u8 *daddr;
+ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+ int ns_olen;
+ int i, len;
+
+ if (dev == NULL)
+ return NULL;
+
+ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+ sizeof(*na) + na_olen + dev->needed_tailroom;
+ reply = alloc_skb(len, GFP_ATOMIC);
+ if (reply == NULL)
+ return NULL;
+
+ reply->protocol = htons(ETH_P_IPV6);
+ reply->dev = dev;
+ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+ skb_push(reply, sizeof(struct ethhdr));
+ skb_set_mac_header(reply, 0);
+
+ ns = (struct nd_msg *)skb_transport_header(request);
+
+ daddr = eth_hdr(request)->h_source;
+ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }
+
+ /* Ethernet header */
+ ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
+ ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
+ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+ reply->protocol = htons(ETH_P_IPV6);
+
+ skb_pull(reply, sizeof(struct ethhdr));
+ skb_set_network_header(reply, 0);
+ skb_put(reply, sizeof(struct ipv6hdr));
+
+ /* IPv6 header */
+
+ pip6 = ipv6_hdr(reply);
+ memset(pip6, 0, sizeof(struct ipv6hdr));
+ pip6->version = 6;
+ pip6->priority = ipv6_hdr(request)->priority;
+ pip6->nexthdr = IPPROTO_ICMPV6;
+ pip6->hop_limit = 255;
+ pip6->daddr = ipv6_hdr(request)->saddr;
+ pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+ skb_pull(reply, sizeof(struct ipv6hdr));
+ skb_set_transport_header(reply, 0);
+
+ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na)+na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ na->icmph.icmp6_router = isrouter;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+ ether_addr_copy(&na->opt[2], n->ha);
+ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+ na->opt[1] = na_olen >> 3;
+
+ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+ csum_partial(na, sizeof(*na)+na_olen, 0));
+
+ pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+ skb_push(reply, sizeof(struct ipv6hdr));
+
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return reply;
+}
+
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct neighbour *n;
- union vxlan_addr ipa;
+ struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
- struct nd_msg *msg;
- struct inet6_dev *in6_dev = NULL;
+ struct neighbour *n;
+ struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@@ -1360,19 +1451,20 @@
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
- if (ipv6_addr_loopback(daddr) ||
- ipv6_addr_is_multicast(daddr))
- goto out;
-
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(&msg->target))
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
+ struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@@ -1386,13 +1478,23 @@
goto out;
}
- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
- !!in6_dev->cnf.forwarding,
- true, false, false);
+ reply = vxlan_na_create(skb, n,
+ !!(f ? f->flags & NTF_ROUTER : 0));
+
neigh_release(n);
+
+ if (reply == NULL)
+ goto out;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+
} else if (vxlan->flags & VXLAN_F_L3MISS) {
- ipa.sin6.sin6_addr = *daddr;
- ipa.sa.sa_family = AF_INET6;
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = msg->target,
+ .sa.sa_family = AF_INET6,
+ };
+
vxlan_ip_miss(dev, &ipa);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1cc1356..1b6b4d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -57,7 +57,7 @@
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -96,7 +96,7 @@
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 11eab9f..9078a6c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1534,7 +1534,7 @@
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
- u32 reg;
+ u32 reg, last_val;
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
@@ -1542,9 +1542,14 @@
if (AR_SREV_9285_12_OR_LATER(ah))
return true;
+ last_val = REG_READ(ah, AR_OBS_BUS_1);
do {
reg = REG_READ(ah, AR_OBS_BUS_1);
+ if (reg != last_val)
+ return true;
+ udelay(1);
+ last_val = reg;
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a0ebdd0..82e340d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -732,11 +732,18 @@
return NULL;
/*
- * mark descriptor as zero-length and set the 'more'
- * flag to ensure that both buffers get discarded
+ * Re-check previous descriptor, in case it has been filled
+ * in the mean time.
*/
- rs->rs_datalen = 0;
- rs->rs_more = true;
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ /*
+ * mark descriptor as zero-length and set the 'more'
+ * flag to ensure that both buffers get discarded
+ */
+ rs->rs_datalen = 0;
+ rs->rs_more = true;
+ }
}
list_del(&bf->list);
@@ -985,32 +992,32 @@
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
- int ret = 0;
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf().
*/
- sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
- return -EINVAL;
+ goto corrupt;
+
+ sc->rx.discard_next = false;
/*
* Discard zero-length packets.
*/
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
/* Only use status info from the last fragment */
@@ -1024,10 +1031,8 @@
* This is different from the other corrupt descriptor
* condition handled above.
*/
- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
- ret = -EINVAL;
- goto exit;
- }
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+ goto corrupt;
hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
@@ -1043,18 +1048,15 @@
if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
RX_STAT_INC(rx_beacons);
@@ -1064,15 +1066,11 @@
/*
* This shouldn't happen, but have a safety check anyway.
*/
- if (WARN_ON(!ah->curchan)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (WARN_ON(!ah->curchan))
+ return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
- ret =-EINVAL;
- goto exit;
- }
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ return -EINVAL;
ath9k_process_rssi(common, hw, rx_stats, rx_status);
@@ -1087,9 +1085,11 @@
sc->rx.num_pkts++;
#endif
-exit:
- sc->rx.discard_next = false;
- return ret;
+ return 0;
+
+corrupt:
+ sc->rx.discard_next = rx_stats->rs_more;
+ return -EINVAL;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0a75e2f..55897d5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1444,14 +1444,16 @@
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- if (!tid->sched)
- continue;
-
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
+ if (!tid->sched) {
+ ath_txq_unlock(sc, txq);
+ continue;
+ }
+
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
@@ -2061,7 +2063,7 @@
ATH_TXBUF_RESET(bf);
- if (tid) {
+ if (tid && ieee80211_is_data_present(hdr->frame_control)) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2184,14 +2186,15 @@
txq->stopped = true;
}
+ if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
+
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an &&
ieee80211_is_data_present(hdr->frame_control)) {
- tid = ath_get_skb_tid(sc, txctl->an, skb);
-
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3e99189..ddaa9ef 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -457,7 +457,6 @@
u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */
- struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */
u16 head_align; /* buffer pointer alignment */
u16 sgentry_align; /* scatter-gather buffer alignment */
};
@@ -1944,19 +1943,21 @@
if (lastfrm && chain_pad)
tail_pad += blksize - chain_pad;
if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
- pkt_pad = bus->txglom_sgpad;
- if (pkt_pad == NULL)
- brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+ bus->head_align);
if (pkt_pad == NULL)
return -ENOMEM;
ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ kfree_skb(pkt_pad);
return ret;
+ }
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
*(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
} else {
ntail = pkt->data_len + tail_pad -
@@ -2011,7 +2012,7 @@
return ret;
head_pad = (u16)ret;
if (head_pad)
- memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
+ memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
total_len += pkt_next->len;
@@ -3486,10 +3487,6 @@
bus->txglom = false;
value = 1;
pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
- bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
- if (!bus->txglom_sgpad)
- brcmf_err("allocating txglom padding skb failed, reduced performance\n");
-
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@@ -4053,7 +4050,6 @@
brcmf_sdio_chip_detach(&bus->ci);
}
- brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d36e252..5965255 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -147,7 +147,7 @@
if (!sta->ap && sta->u.sta.challenge)
kfree(sta->u.sta.challenge);
- del_timer(&sta->timer);
+ del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
kfree(sta);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c0d070c..9cdd91c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -590,6 +590,7 @@
sizeof(priv->tid_data[sta_id][tid]));
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
priv->num_stations--;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a6839df..398dd09 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1291,8 +1291,6 @@
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
- struct ieee80211_tx_info *info;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id;
int tid;
@@ -1379,22 +1377,28 @@
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
info = IEEE80211_SKB_CB(skb);
memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_resp->txed_2_done;
info->status.ampdu_len = ba_resp->txed;
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6c..18a895a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -872,8 +872,11 @@
lockdep_assert_held(&mvm->mutex);
- /* Rssi update while not associated ?! */
- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
/* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index e4ead86..2b0ba1f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -152,7 +152,7 @@
IWL_POWER_SCHEME_LP
};
-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4df12fa..76ee486 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -822,16 +822,12 @@
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
- struct ieee80211_tx_info *info;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id, tid, freed;
-
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
-
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
@@ -888,22 +884,26 @@
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- info = IEEE80211_SKB_CB(skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f47bcbe..3872ead 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,13 +359,12 @@
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 32f7500..cb6d189 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -621,7 +621,7 @@
id = *pos++;
elen = *pos++;
left -= 2;
- if (elen > left || elen == 0) {
+ if (elen > left) {
lbs_deb_scan("scan response: invalid IE fmt\n");
goto done;
}
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4..5d9a808 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -189,8 +189,7 @@
vht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c..7db1a89 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -308,8 +308,7 @@
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 03688aa..7fe7b53 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1211,6 +1211,12 @@
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
+ /* If skb allocation was failed earlier for Rx packet,
+ * rx_buf_list[rd_index] would have been left with a NULL.
+ */
+ if (!skb_data)
+ return -ENOMEM;
+
MWIFIEX_SKB_PACB(skb_data, &buf_pa);
pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE);
@@ -1525,6 +1531,14 @@
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
+ mwifiex_pcie_enable_host_int(adapter);
+ if (mwifiex_write_reg(adapter,
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
+ return -1;
+ }
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1993,23 +2007,9 @@
adapter->int_status |= pcie_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
- if (pcie_ireg & HOST_INTR_CMD_DONE) {
- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
- (adapter->ps_state == PS_STATE_SLEEP)) {
- mwifiex_pcie_enable_host_int(adapter);
- if (mwifiex_write_reg(adapter,
- PCIE_CPU_INT_EVENT,
- CPU_INTR_SLEEP_CFM_DONE)
- ) {
- dev_warn(adapter->dev,
- "Write register failed\n");
- return;
-
- }
- }
- } else if (!adapter->pps_uapsd_mode &&
- adapter->ps_state == PS_STATE_SLEEP &&
- mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP &&
+ mwifiex_pcie_ok_to_access_hw(adapter)) {
/* Potentially for PCIe we could get other
* interrupts like shared. Don't change power
* state until cookie is set */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e..668547c 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2101,12 +2101,12 @@
curr_bss->ht_info_offset);
if (curr_bss->bcn_vht_cap)
- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_cap_offset);
+ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_cap_offset);
if (curr_bss->bcn_vht_oper)
- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_info_offset);
+ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_info_offset);
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index e8ebbd4..2087488 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,8 +22,6 @@
#define USB_VERSION "1.0"
-static const char usbdriver_name[] = "usb8xxx";
-
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_card_rec *usb_card;
@@ -527,13 +525,6 @@
MWIFIEX_BSS_ROLE_ANY),
MWIFIEX_ASYNC_CMD);
-#ifdef CONFIG_PM
- /* Resume handler may be called due to remote wakeup,
- * force to exit suspend anyway
- */
- usb_disable_autosuspend(card->udev);
-#endif /* CONFIG_PM */
-
return 0;
}
@@ -567,13 +558,12 @@
}
static struct usb_driver mwifiex_usb_driver = {
- .name = usbdriver_name,
+ .name = "mwifiex_usb",
.probe = mwifiex_usb_probe,
.disconnect = mwifiex_usb_disconnect,
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
- .supports_autosuspend = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 13eaeed..981cf6e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -559,7 +559,8 @@
mwifiex_wmm_delete_all_ralist(priv);
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
- if (priv->adapter->if_ops.clean_pcie_ring)
+ if (priv->adapter->if_ops.clean_pcie_ring &&
+ !priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 7f8b5d1..41d4a81 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -5460,14 +5460,15 @@
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- rt2800_bbp_write(rt2x00dev, 69, 0x0d);
- rt2800_bbp_write(rt2x00dev, 70, 0x06);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5510,7 +5511,6 @@
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
- rt2800_bbp_write(rt2x00dev, 148, 0x84);
}
rt2800_disable_unused_dac_adc(rt2x00dev);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 123c4bb..cde0eaf 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -180,7 +180,7 @@
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
/* The actual length doesn't include the target's alignment */
- skb->len = desc->length - PLCP_HEADER_LENGTH;
+ skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
fc = (u16 *)skb->data;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7669d49..301cc03 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -132,8 +132,7 @@
/* If the skb is GSO then we'll also need an extra slot for the
* metadata.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb))
min_slots_needed++;
/* If the skb can't possibly fit in the remaining slots
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e5284bc..438d0c0 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -240,7 +240,7 @@
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -299,12 +299,12 @@
}
/* Leave a gap for the GSO descriptor. */
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- else
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
@@ -338,19 +338,15 @@
int head = 1;
int old_meta_prod;
int gso_type;
- int gso_size;
old_meta_prod = npo->meta_prod;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- gso_size = skb_shinfo(skb)->gso_size;
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- gso_size = skb_shinfo(skb)->gso_size;
- } else {
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- gso_size = 0;
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
/* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +354,7 @@
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -368,7 +364,7 @@
if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
@@ -500,8 +496,9 @@
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++;
/* If the skb may not fit then bail out now */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f9daa9e..e30d800 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -907,6 +907,7 @@
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
+ skb_reset_network_header(skb);
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 10b5110..89e888a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -342,27 +342,72 @@
}
EXPORT_SYMBOL(of_get_cpu_node);
-/** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
+/**
+ * __of_device_is_compatible() - Check if the node matches given constraints
+ * @device: pointer to node
+ * @compat: required compatible string, NULL or "" for any match
+ * @type: required device_type value, NULL or "" for any match
+ * @name: required node name, NULL or "" for any match
+ *
+ * Checks if the given @compat, @type and @name strings match the
+ * properties of the given @device. A constraints can be skipped by
+ * passing NULL or an empty string as the constraint.
+ *
+ * Returns 0 for no match, and a positive integer on match. The return
+ * value is a relative score with larger values indicating better
+ * matches. The score is weighted for the most specific compatible value
+ * to get the highest score. Matching type is next, followed by matching
+ * name. Practically speaking, this results in the following priority
+ * order for matches:
+ *
+ * 1. specific compatible && type && name
+ * 2. specific compatible && type
+ * 3. specific compatible && name
+ * 4. specific compatible
+ * 5. general compatible && type && name
+ * 6. general compatible && type
+ * 7. general compatible && name
+ * 8. general compatible
+ * 9. type && name
+ * 10. type
+ * 11. name
*/
static int __of_device_is_compatible(const struct device_node *device,
- const char *compat)
+ const char *compat, const char *type, const char *name)
{
- const char* cp;
- int cplen, l;
+ struct property *prop;
+ const char *cp;
+ int index = 0, score = 0;
- cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ /* Compatible match has highest priority */
+ if (compat && compat[0]) {
+ prop = __of_find_property(device, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp), index++) {
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
+ score = INT_MAX/2 - (index << 2);
+ break;
+ }
+ }
+ if (!score)
+ return 0;
}
- return 0;
+ /* Matching type is better than matching name */
+ if (type && type[0]) {
+ if (!device->type || of_node_cmp(type, device->type))
+ return 0;
+ score += 2;
+ }
+
+ /* Matching name is a bit better than not */
+ if (name && name[0]) {
+ if (!device->name || of_node_cmp(name, device->name))
+ return 0;
+ score++;
+ }
+
+ return score;
}
/** Checks if the given "compat" string matches one of the strings in
@@ -375,7 +420,7 @@
int res;
raw_spin_lock_irqsave(&devtree_lock, flags);
- res = __of_device_is_compatible(device, compat);
+ res = __of_device_is_compatible(device, compat, NULL, NULL);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
@@ -681,10 +726,7 @@
raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
- if (__of_device_is_compatible(np, compatible) &&
+ if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
}
@@ -730,65 +772,26 @@
}
EXPORT_SYMBOL(of_find_node_with_property);
-static const struct of_device_id *
-of_match_compatible(const struct of_device_id *matches,
- const struct device_node *node)
-{
- const char *cp;
- int cplen, l;
- const struct of_device_id *m;
-
- cp = __of_get_property(node, "compatible", &cplen);
- while (cp && (cplen > 0)) {
- m = matches;
- while (m->name[0] || m->type[0] || m->compatible[0]) {
- /* Only match for the entries without type and name */
- if (m->name[0] || m->type[0] ||
- of_compat_cmp(m->compatible, cp,
- strlen(m->compatible)))
- m++;
- else
- return m;
- }
-
- /* Get node's next compatible string */
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
- }
-
- return NULL;
-}
-
static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
- const struct of_device_id *m;
+ const struct of_device_id *best_match = NULL;
+ int score, best_score = 0;
if (!matches)
return NULL;
- m = of_match_compatible(matches, node);
- if (m)
- return m;
-
- while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
- int match = 1;
- if (matches->name[0])
- match &= node->name
- && !strcmp(matches->name, node->name);
- if (matches->type[0])
- match &= node->type
- && !strcmp(matches->type, node->type);
- if (matches->compatible[0])
- match &= __of_device_is_compatible(node,
- matches->compatible);
- if (match)
- return matches;
- matches++;
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
+ score = __of_device_is_compatible(node, matches->compatible,
+ matches->type, matches->name);
+ if (score > best_score) {
+ best_match = matches;
+ best_score = score;
+ }
}
- return NULL;
+
+ return best_match;
}
/**
@@ -796,12 +799,7 @@
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
- * Low level utility function used by device matching. We have two ways
- * of matching:
- * - Try to find the best compatible match by comparing each compatible
- * string of device node with all the given matches respectively.
- * - If the above method failed, then try to match the compatible by using
- * __of_device_is_compatible() besides the match in type and name.
+ * Low level utility function used by device matching.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index e21012b..6643d19 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -300,6 +300,72 @@
of_node_put(np);
}
+static struct of_device_id match_node_table[] = {
+ { .data = "A", .name = "name0", }, /* Name alone is lowest priority */
+ { .data = "B", .type = "type1", }, /* followed by type alone */
+
+ { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
+ { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
+ { .data = "Cc", .name = "name2", .type = "type2", },
+
+ { .data = "E", .compatible = "compat3" },
+ { .data = "G", .compatible = "compat2", },
+ { .data = "H", .compatible = "compat2", .name = "name5", },
+ { .data = "I", .compatible = "compat2", .type = "type1", },
+ { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
+ { .data = "K", .compatible = "compat2", .name = "name9", },
+ {}
+};
+
+static struct {
+ const char *path;
+ const char *data;
+} match_node_tests[] = {
+ { .path = "/testcase-data/match-node/name0", .data = "A", },
+ { .path = "/testcase-data/match-node/name1", .data = "B", },
+ { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
+ { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
+ { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
+ { .path = "/testcase-data/match-node/name3", .data = "E", },
+ { .path = "/testcase-data/match-node/name4", .data = "G", },
+ { .path = "/testcase-data/match-node/name5", .data = "H", },
+ { .path = "/testcase-data/match-node/name6", .data = "G", },
+ { .path = "/testcase-data/match-node/name7", .data = "I", },
+ { .path = "/testcase-data/match-node/name8", .data = "J", },
+ { .path = "/testcase-data/match-node/name9", .data = "K", },
+};
+
+static void __init of_selftest_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
+ np = of_find_node_by_path(match_node_tests[i].path);
+ if (!np) {
+ selftest(0, "missing testcase node %s\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ match = of_match_node(match_node_table, np);
+ if (!match) {
+ selftest(0, "%s didn't match anything\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ if (strcmp(match->data, match_node_tests[i].data) != 0) {
+ selftest(0, "%s got wrong match. expected %s, got %s\n",
+ match_node_tests[i].path, match_node_tests[i].data,
+ (const char *)match->data);
+ continue;
+ }
+ selftest(1, "passed");
+ }
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -316,6 +382,7 @@
of_selftest_property_match_string();
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
+ of_selftest_match_node();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644
index 0000000..3a5b75a
--- /dev/null
+++ b/drivers/of/testcase-data/testcases.dtsi
@@ -0,0 +1,3 @@
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
similarity index 100%
rename from arch/arm/boot/dts/testcases/tests-interrupts.dtsi
rename to drivers/of/testcase-data/tests-interrupts.dtsi
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/testcase-data/tests-match.dtsi
new file mode 100644
index 0000000..c9e5411
--- /dev/null
+++ b/drivers/of/testcase-data/tests-match.dtsi
@@ -0,0 +1,19 @@
+
+/ {
+ testcase-data {
+ match-node {
+ name0 { };
+ name1 { device_type = "type1"; };
+ a { name2 { device_type = "type1"; }; };
+ b { name2 { }; };
+ c { name2 { device_type = "type2"; }; };
+ name3 { compatible = "compat3"; };
+ name4 { compatible = "compat2", "compat3"; };
+ name5 { compatible = "compat2", "compat3"; };
+ name6 { compatible = "compat1", "compat2", "compat3"; };
+ name7 { compatible = "compat2"; device_type = "type1"; };
+ name8 { compatible = "compat2"; device_type = "type1"; };
+ name9 { compatible = "compat2"; };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
similarity index 100%
rename from arch/arm/boot/dts/testcases/tests-phandle.dtsi
rename to drivers/of/testcase-data/tests-phandle.dtsi
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc..3890166 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -162,8 +162,6 @@
avail = *r;
pci_clip_resource_to_region(bus, &avail, region);
- if (!resource_size(&avail))
- continue;
/*
* "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13478ec..0e79665 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -60,14 +60,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-/*
- * This product ID is registered by Marvell, and used when the Marvell
- * SoC is not the root complex, but an endpoint on the PCIe bus. It is
- * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
- * bridge.
- */
-#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
-
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
@@ -388,7 +380,8 @@
bridge->class = PCI_CLASS_BRIDGE_PCI;
bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->cache_line_size = 0x10;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a0fec6..955ab79 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -545,9 +545,15 @@
return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
char *name = kmalloc(20, GFP_KERNEL);
- msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
+ if (!name)
goto error_attrs;
+
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr) {
+ kfree(name);
+ goto error_attrs;
+ }
+
sprintf(name, "%d", entry->irq);
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = name;
@@ -589,6 +595,7 @@
++count;
msi_attr = msi_attrs[count];
}
+ kfree(msi_attrs);
return ret;
}
@@ -959,7 +966,6 @@
/**
* pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
-
* This function returns the number of device's MSI-X table entries and
* therefore the number of MSI-X vectors device is capable of sending.
* It returns a negative errno if the device is not capable of sending MSI-X
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1febe90..fdbc294 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1181,6 +1181,8 @@
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
+ u16 cmd;
+ u8 pin;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
@@ -1190,6 +1192,17 @@
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
return 0;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index afa2354..c7a551c 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,7 +5,7 @@
menu "PHY Subsystem"
config GENERIC_PHY
- tristate "PHY Core"
+ bool "PHY Core"
help
Generic PHY support.
@@ -61,6 +61,7 @@
config BCM_KONA_USB2_PHY
tristate "Broadcom Kona USB2 PHY Driver"
depends on GENERIC_PHY
+ depends on HAS_IOMEM
help
Enable this to support the Broadcom Kona USB 2.0 PHY.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 5f5b0f4..6c73837 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -176,6 +176,8 @@
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->init_count;
@@ -232,6 +234,8 @@
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->power_count;
mutex_unlock(&phy->mutex);
@@ -404,17 +408,11 @@
index = of_property_match_string(dev->of_node, "phy-names",
string);
phy = of_phy_get(dev, index);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
} else {
phy = phy_lookup(dev, string);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
}
+ if (IS_ERR(phy))
+ return phy;
if (!try_module_get(phy->ops->owner))
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 1dbe6ce..0786fef 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -76,10 +76,6 @@
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
@@ -87,6 +83,10 @@
}
phy_set_drvdata(phy, state);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 0c5efab..7f13932 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -134,11 +134,6 @@
dev_set_drvdata(dev, state);
spin_lock_init(&state->slock);
- phy_provider = devm_of_phy_provider_register(dev,
- exynos_mipi_video_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev,
&exynos_mipi_video_phy_ops, NULL);
@@ -152,6 +147,11 @@
phy_set_drvdata(phy, &state->phys[i]);
}
+ phy_provider = devm_of_phy_provider_register(dev,
+ exynos_mipi_video_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d43786f..d70ecd6 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -99,17 +99,17 @@
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- phy_provider = devm_of_phy_provider_register(&pdev->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* The boot loader may of left it on. Turn it off. */
phy_mvebu_sata_power_off(phy);
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index bfc5c33..7699752 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -177,11 +177,6 @@
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
- phy_provider = devm_of_phy_provider_register(phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev, "Failed to get control device phandle\n");
@@ -214,6 +209,11 @@
phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index daf65e6..c3ace1d 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -695,11 +695,6 @@
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
- phy_provider = devm_of_phy_provider_register(twl->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(twl->dev, &ops, init_data);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
@@ -708,6 +703,11 @@
phy_set_drvdata(phy, twl);
+ phy_provider = devm_of_phy_provider_register(twl->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be361b7..1e4e693 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,7 +217,7 @@
select PINCTRL_MXS
config PINCTRL_MSM
- tristate
+ bool
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53..eb25002 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@
}
static struct of_device_id capri_pinctrl_of_match[] = {
- { .compatible = "brcm,capri-pinctrl", },
+ { .compatible = "brcm,bcm11351-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 9ccf681..f9fabe9 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -584,7 +585,7 @@
spin_lock_irqsave(&pctl->lock, flags);
regval = readl(pctl->membase + reg);
- regval &= ~IRQ_CFG_IRQ_MASK;
+ regval &= ~(IRQ_CFG_IRQ_MASK << index);
writel(regval | (mode << index), pctl->membase + reg);
spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@
static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
@@ -674,10 +676,12 @@
if (reg) {
int irqoffset;
+ chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) {
int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
generic_handle_irq(pin_irq);
}
+ chained_irq_exit(chip, desc);
}
}
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index 01c494f..552b0e9 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -511,7 +511,7 @@
static inline u32 sunxi_irq_cfg_reg(u16 irq)
{
- u8 reg = irq / IRQ_CFG_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
return reg + IRQ_CFG_REG;
}
@@ -523,7 +523,7 @@
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
{
- u8 reg = irq / IRQ_CTRL_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
return reg + IRQ_CTRL_REG;
}
@@ -535,7 +535,7 @@
static inline u32 sunxi_irq_status_reg(u16 irq)
{
- u8 reg = irq / IRQ_STATUS_IRQ_PER_REG;
+ u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
return reg + IRQ_STATUS_REG;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 77d103f..567d691 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -89,7 +89,8 @@
/* GPSR6 */
FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
- FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+ FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
+ FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@
PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+ PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
/* IPSR0 */
PINMUX_IPSR_DATA(IP0_0, D0),
@@ -3825,7 +3827,7 @@
GP_6_11_FN, FN_IP13_25,
GP_6_10_FN, FN_IP13_24_23,
GP_6_9_FN, FN_IP13_22,
- 0, 0,
+ GP_6_8_FN, FN_SD1_CLK,
GP_6_7_FN, FN_IP13_21_19,
GP_6_6_FN, FN_IP13_18_16,
GP_6_5_FN, FN_IP13_15,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index a0d6152..617a491 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -598,7 +598,7 @@
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
dev_err(bank->chip.gc.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -611,7 +611,7 @@
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
sirfsoc_gpio_irq_mask(d);
- gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+ gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
}
static struct irq_chip sirfsoc_irq_chip = {
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 167f3d0..66977eb 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -183,9 +183,7 @@
struct resource r = {0};
int i, flags;
- if (acpi_dev_resource_memory(res, &r)
- || acpi_dev_resource_io(res, &r)
- || acpi_dev_resource_address_space(res, &r)
+ if (acpi_dev_resource_address_space(res, &r)
|| acpi_dev_resource_ext_address_space(res, &r)) {
pnp_add_resource(dev, &r);
return AE_OK;
@@ -217,6 +215,17 @@
}
switch (res->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (acpi_dev_resource_memory(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ if (acpi_dev_resource_io(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 769d265..deb7f4b 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -21,7 +21,7 @@
#include "pnpbios.h"
-static struct {
+__visible struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
@@ -41,6 +41,7 @@
__asm__(".text \n"
__ALIGN_STR "\n"
+ ".globl pnp_bios_callfunc\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
@@ -66,9 +67,9 @@
* after PnP BIOS oopses.
*/
-u32 pnp_bios_fault_esp;
-u32 pnp_bios_fault_eip;
-u32 pnp_bios_is_utter_crap = 0;
+__visible u32 pnp_bios_fault_esp;
+__visible u32 pnp_bios_fault_eip;
+__visible u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 8a843a0..a40b9c3 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -52,8 +52,10 @@
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
- if (test_and_set_bit(offset, &lp3943->pin_used))
+ if (test_and_set_bit(offset, &lp3943->pin_used)) {
+ kfree(pwm_map);
return ERR_PTR(-EBUSY);
+ }
}
return pwm_map;
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index b4b0d83..7061ac0 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -678,6 +678,7 @@
struct list_head free_list;
dma_cookie_t completed_cookie;
struct tasklet_struct tasklet;
+ bool active;
};
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 502663f..91245f5d 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -206,8 +206,8 @@
{
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
-
- tasklet_schedule(&bdma_chan->tasklet);
+ if (bdma_chan->active)
+ tasklet_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
@@ -562,7 +562,7 @@
}
#endif /* CONFIG_PCI_MSI */
- tasklet_enable(&bdma_chan->tasklet);
+ bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
return bdma_chan->bd_num - 1;
@@ -576,9 +576,7 @@
static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
@@ -589,14 +587,25 @@
BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue));
- tasklet_disable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector);
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector);
+ } else
+#endif
+ synchronize_irq(priv->pdev->irq);
+
+ tasklet_kill(&bdma_chan->tasklet);
spin_lock_bh(&bdma_chan->lock);
list_splice_init(&bdma_chan->free_list, &list);
spin_unlock_bh(&bdma_chan->lock);
- tsi721_bdma_interrupt_enable(bdma_chan, 0);
-
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
@@ -790,6 +799,7 @@
bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i;
bdma_chan->id = i;
+ bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock);
@@ -799,7 +809,6 @@
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan);
- tasklet_disable(&bdma_chan->tasklet);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 16a309e..afca1bc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -953,6 +953,8 @@
return 0;
}
+static int _regulator_do_enable(struct regulator_dev *rdev);
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1013,10 +1015,9 @@
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
- ops->enable) {
- ret = ops->enable(rdev);
- if (ret < 0) {
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
rdev_err(rdev, "failed to enable\n");
goto out;
}
@@ -1359,7 +1360,7 @@
goto found;
/* Don't log an error when called from regulator_get_optional() */
} else if (!have_full_constraints() || exclusive) {
- dev_err(dev, "dummy supplies not allowed\n");
+ dev_warn(dev, "dummy supplies not allowed\n");
}
mutex_unlock(®ulator_list_mutex);
@@ -1907,8 +1908,6 @@
trace_regulator_disable_complete(rdev_get_name(rdev));
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
return 0;
}
@@ -1932,6 +1931,8 @@
rdev_err(rdev, "failed to disable\n");
return ret;
}
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
rdev->use_count = 0;
@@ -1984,20 +1985,16 @@
{
int ret = 0;
- /* force disable */
- if (rdev->desc->ops->disable) {
- /* ah well, who wants to live forever... */
- ret = rdev->desc->ops->disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
- return ret;
- }
- /* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
- REGULATOR_EVENT_DISABLE, NULL);
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to force disable\n");
+ return ret;
}
- return ret;
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
+
+ return 0;
}
/**
@@ -3630,23 +3627,18 @@
mutex_lock(®ulator_list_mutex);
list_for_each_entry(rdev, ®ulator_list, list) {
- struct regulator_ops *ops = rdev->desc->ops;
-
mutex_lock(&rdev->mutex);
- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
- ops->enable) {
- error = ops->enable(rdev);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ error = _regulator_do_enable(rdev);
if (error)
ret = error;
} else {
if (!have_full_constraints())
goto unlock;
- if (!ops->disable)
- goto unlock;
if (!_regulator_is_enabled(rdev))
goto unlock;
- error = ops->disable(rdev);
+ error = _regulator_do_disable(rdev);
if (error)
ret = error;
}
@@ -3820,7 +3812,7 @@
ops = rdev->desc->ops;
c = rdev->constraints;
- if (!ops->disable || (c && c->always_on))
+ if (c && c->always_on)
continue;
mutex_lock(&rdev->mutex);
@@ -3841,7 +3833,7 @@
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
- ret = ops->disable(rdev);
+ ret = _regulator_do_disable(rdev);
if (ret != 0)
rdev_err(rdev, "couldn't disable: %d\n", ret);
} else {
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 56727eb..91e99a2 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,3 +1,4 @@
+
/*
* Regulator driver for DA9063 PMIC series
*
@@ -60,7 +61,8 @@
.desc.ops = &da9063_ldo_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
- .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
+ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
+ + (DA9063_V##regl_name##_BIAS)), \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_LDO_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index 186df87..e061952 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -166,9 +166,10 @@
ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
MAX14577_REG_MAX);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- }
+ else
+ ret = 0;
of_node_put(np);
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d7164bb..d958dfa 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -535,7 +535,7 @@
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -591,6 +591,8 @@
rmode++;
}
+ of_node_put(regulators_np);
+
if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
pdata->buck2_gpiodvs = true;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7afd373..c4cde9c 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -580,10 +580,12 @@
clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
- ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
+ } else {
+ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
}
s3c_rtc_enable(pdev, 0);
@@ -605,10 +607,15 @@
clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
- writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
- if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
- writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+ if (ticnt_en_save) {
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save,
+ s3c_rtc_base + S3C2410_RTCCON);
+ }
+ } else {
+ writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
}
if (device_may_wakeup(dev) && wake_en) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f6b9188..9f0ea6c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -610,6 +610,7 @@
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index dc542e0..0bc91e4 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -311,7 +311,7 @@
} __packed * msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen;
+ int replylen, req_sumlen, resp_sumlen;
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
@@ -321,12 +321,34 @@
xcRB->request_data_length;
if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if ((CEIL4(xcRB->request_control_blk_length) <=
+ xcRB->request_data_length) ?
+ (req_sumlen < xcRB->request_data_length) :
+ (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ return -EINVAL;
+ }
+
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if (replylen > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+ (resp_sumlen < xcRB->reply_data_length) :
+ (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ return -EINVAL;
+ }
+
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bd5d4ab..a0aff2e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1660,7 +1660,6 @@
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -2605,6 +2604,7 @@
return 0;
out_qdio:
qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qdio_free(CARD_DDEV(card));
return rc;
}
@@ -4906,9 +4906,11 @@
if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
+ rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)
goto retriable;
@@ -4918,7 +4920,6 @@
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc)
goto retriable;
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
retriable:
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break1");
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0710550..908d825 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1091,6 +1091,7 @@
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -1132,6 +1133,7 @@
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -1194,6 +1196,7 @@
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0f43042..3524d34 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3447,6 +3447,7 @@
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -3493,6 +3494,7 @@
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -3545,6 +3547,7 @@
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6b4678a..4ccb5d8 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -507,7 +507,6 @@
}
/* Let us be really paranoid for modifications to probing code. */
- /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */
if (sparc_cpu_model != sun4m) {
/* We must be on sun4m because we use MMU Bypass ASI. */
return -ENXIO;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f37505..5642a9b 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,7 +325,7 @@
if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
continue;
- if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+ if (sc->device->lun != abrt_task->sc->device->lun)
continue;
/* Invalidate WRB Posted for this Task */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed88089..e9279a8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -594,13 +594,13 @@
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@
return FAILED;
}
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
- memset(mp_req->req_buf, 0, PAGE_SIZE);
- memset(mp_req->resp_buf, 0, PAGE_SIZE);
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@@ -677,7 +677,7 @@
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 4d93177..d9bae56 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -673,7 +673,8 @@
/* Allocate and map SQ */
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
- tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +687,8 @@
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
- tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +701,8 @@
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
- tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +713,9 @@
}
memset(tgt->rq, 0, tgt->rq_mem_size);
- tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
- tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +726,7 @@
}
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
- num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -731,13 +735,13 @@
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map XFERQ */
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
- tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +754,8 @@
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
- tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +767,9 @@
memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
- (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
- (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt->confq_pbl_size,
@@ -777,7 +781,7 @@
}
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
- num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -786,7 +790,7 @@
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map ConnDB */
@@ -805,8 +809,8 @@
/* Allocate and map LCQ */
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
- tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
&tgt->lcq_dma, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e4cf23d..b87a193 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -61,7 +61,7 @@
* yield integral num of page buffers
*/
/* adjust SQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@
~(num_elements_per_pg - 1);
/* adjust CQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@
~(num_elements_per_pg - 1);
/* adjust RQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
- num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
@@ -973,7 +973,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -981,13 +981,13 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
- num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
- num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
}
@@ -1064,11 +1064,11 @@
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
- (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
- (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
- (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
- ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
- iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
- (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+ (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 854dad7..c8b0aff 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -525,7 +525,7 @@
struct iscsi_bd *mp_bdt;
u64 addr;
- hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@
goto out;
}
- hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ CNIC_PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
@@ -548,7 +549,7 @@
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
- mp_bdt->buffer_length = PAGE_SIZE;
+ mp_bdt->buffer_length = CNIC_PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
@@ -565,12 +566,12 @@
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
@@ -934,14 +935,14 @@
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@
return 0;
login_resp_bd_tbl_failure:
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 4911310..22a9bb1 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -311,9 +311,8 @@
}
#define for_each_isci_host(id, ihost, pdev) \
- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
- ihost = to_pci_info(pdev)->hosts[++id])
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline void wait_for_start(struct isci_host *ihost)
{
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 85c77f6..ac87974 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -615,13 +615,6 @@
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
- u32 port_state = iport->sm.current_state_id;
-
- /* if the PORT'S state is resetting then the link up is from
- * port hard reset in this case, we need to tell the port
- * that link up is recieved
- */
- BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 0d30ca8..5d6fda7 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -801,7 +801,7 @@
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1fe95e..266724b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2996,8 +2996,7 @@
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9bc86b9..0a1dcb4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2880,6 +2880,7 @@
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
+#define ATIO_VECTOR 2
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
@@ -2936,34 +2937,47 @@
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < ha->msix_count; i++) {
+ for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- ret = request_irq(qentry->vector,
- qla83xx_msix_entries[i].handler,
- 0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
- } else {
+ else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
0, msix_entries[i].name, rsp);
- }
- if (ret) {
- ql_log(ql_log_fatal, vha, 0x00cb,
- "MSI-X: unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
- qla24xx_disable_msix(ha);
- ha->mqenable = 0;
- goto msix_out;
- }
+ if (ret)
+ goto msix_register_fail;
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
+ /*
+ * If target mode is enable, also request the vector for the ATIO
+ * queue.
+ */
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ qentry = &ha->msix_entries[ATIO_VECTOR];
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[ATIO_VECTOR].handler,
+ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+msix_register_fail:
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2eb97d7..0cb7307 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -790,17 +790,32 @@
}
/* Called by tcm_qla2xxx configfs code */
-void qlt_stop_phase1(struct qla_tgt *tgt)
+int qlt_stop_phase1(struct qla_tgt *tgt)
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&qla_tgt_mutex);
+ if (!vha->fc_vport) {
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ bool npiv_vports;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ npiv_vports = (fc_host->npiv_vports_inuse);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ if (npiv_vports) {
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+ }
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
- dump_stack();
- return;
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
}
ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -815,6 +830,7 @@
qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&qla_tgt_mutex);
flush_delayed_work(&tgt->sess_del_work);
@@ -841,6 +857,7 @@
/* Wait for sessions to clear out (just in case) */
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+ return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -3185,7 +3202,8 @@
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
"SRR cmd %p (se_cmd %p, tag %d, op %x), "
"sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
- se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm);
@@ -4181,6 +4199,9 @@
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+ if (base_vha->fc_vport)
+ return 0;
+
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
@@ -4194,6 +4215,10 @@
if (!vha->vha_tgt.qla_tgt)
return 0;
+ if (vha->fc_vport) {
+ qlt_release(vha->vha_tgt.qla_tgt);
+ return 0;
+ }
mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
@@ -4265,6 +4290,12 @@
spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue;
}
+ if (tgt->tgt_stop) {
+ pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!scsi_host_get(host)) {
@@ -4279,12 +4310,11 @@
scsi_host_put(host);
continue;
}
- mutex_unlock(&qla_tgt_mutex);
-
rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
if (rc != 0)
scsi_host_put(host);
+ mutex_unlock(&qla_tgt_mutex);
return rc;
}
mutex_unlock(&qla_tgt_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 66e755c..ce33d8c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1001,7 +1001,7 @@
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
-extern void qlt_stop_phase1(struct qla_tgt *);
+extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 75a141b..788c4fe 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -182,20 +182,6 @@
return 0;
}
-static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
- u64 wwpn, u64 wwnn)
-{
- u8 b[8], b2[8];
-
- put_unaligned_be64(wwpn, b);
- put_unaligned_be64(wwnn, b2);
- return snprintf(buf, len,
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
- b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
-}
-
static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{
return "qla2xxx_npiv";
@@ -227,15 +213,6 @@
return lport->lport_naa_name;
}
-static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
-
- return &lport->lport_npiv_name[0];
-}
-
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -941,15 +918,41 @@
atomic_read(&tpg->lport_tpg_enabled));
}
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(base_vha);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 0);
+ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
- struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
- struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
- struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -964,19 +967,28 @@
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
+ }
+ init_completion(&tpg->tpg_base_comp);
+ schedule_work(&tpg->tpg_base_work);
+ wait_for_completion(&tpg->tpg_base_comp);
if (op) {
- atomic_set(&tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(vha);
- } else {
- if (!vha->vha_tgt.qla_tgt) {
- pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
+ if (!atomic_read(&tpg->lport_tpg_enabled))
return -ENODEV;
- }
- atomic_set(&tpg->lport_tpg_enabled, 0);
- qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ } else {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EPERM;
}
-
return count;
}
@@ -1053,11 +1065,64 @@
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
- lport->tpg_1 = NULL;
-
+ lport->tpg_1 = NULL;
kfree(tpg);
}
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+ &tcm_qla2xxx_npiv_tpg_enable.attr,
+ NULL,
+};
+
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
@@ -1650,6 +1715,9 @@
struct scsi_qla_host *npiv_vha;
struct tcm_qla2xxx_lport *lport =
(struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct tcm_qla2xxx_lport *base_lport =
+ (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+ struct tcm_qla2xxx_tpg *base_tpg;
struct fc_vport_identifiers vport_id;
if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1657,6 +1725,13 @@
return -EPERM;
}
+ if (!base_lport || !base_lport->tpg_1 ||
+ !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+ pr_err("qla2xxx base_lport or tpg_1 not available\n");
+ return -EPERM;
+ }
+ base_tpg = base_lport->tpg_1;
+
memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn;
vport_id.node_name = npiv_wwnn;
@@ -1675,7 +1750,6 @@
npiv_vha = (struct scsi_qla_host *)vport->dd_data;
npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = npiv_vha;
-
scsi_host_get(npiv_vha->host);
return 0;
}
@@ -1714,8 +1788,6 @@
}
lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn;
- tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
- TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
ret = tcm_qla2xxx_init_lport(lport);
@@ -1824,7 +1896,7 @@
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
- .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
@@ -1935,7 +2007,7 @@
*/
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
- tcm_qla2xxx_tpg_attrs;
+ tcm_qla2xxx_npiv_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 275d8b9..33aaac8 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
-/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
-#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h"
@@ -43,6 +41,9 @@
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
+ /* Items for dealing with configfs_depend_item */
+ struct completion tpg_base_comp;
+ struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
@@ -62,8 +63,6 @@
char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted naa WWPN for VPD page 83 etc */
char lport_naa_name[TCM_QLA2XXX_NAMELEN];
- /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
- char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 17d7404..9969fa1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1419,6 +1419,9 @@
{
struct stor_mem_pools *memp = sdevice->hostdata;
+ if (!memp)
+ return;
+
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 31534b5..c3b2fb9 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -132,9 +132,9 @@
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_HIGH;
- else
flags |= GPIOF_INIT_LOW;
+ else
+ flags |= GPIOF_INIT_HIGH;
status = gpio_request_one(cdata->gpio, flags,
dev_name(&spi->dev));
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index b0842f7..5d7b07f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1455,6 +1455,14 @@
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
clk_disable_unprepare(as->clk);
return 0;
@@ -1464,9 +1472,16 @@
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
clk_prepare_enable(as->clk);
- return 0;
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
}
static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cabed8f..28ae470 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -514,7 +514,8 @@
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_disable(mcfqspi->clk);
@@ -523,7 +524,8 @@
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index ec79f72..a253920 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -420,7 +420,6 @@
static int dspi_resume(struct device *dev)
{
-
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
@@ -504,7 +503,7 @@
clk_prepare_enable(dspi->clk);
init_waitqueue_head(&dspi->waitq);
- platform_set_drvdata(pdev, dspi);
+ platform_set_drvdata(pdev, master);
ret = spi_bitbang_start(&dspi->bitbang);
if (ret != 0) {
@@ -525,7 +524,8 @@
static int dspi_remove(struct platform_device *pdev)
{
- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a5474ef..47f15d9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -948,8 +948,8 @@
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_ipg);
- clk_disable_unprepare(spi_imx->clk_per);
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2e7f38c..88eb57e 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -915,7 +915,7 @@
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@
pch_spi_set_master_mode(master);
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@
goto err_spi_register_master;
}
- if (use_dma) {
- dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
- }
-
return 0;
err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index eaec1da..1432d95 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2904,7 +2904,7 @@
refs++;
if (!ref->death)
- goto out;
+ continue;
death++;
@@ -2917,7 +2917,6 @@
BUG();
}
-out:
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 4a08e16..79206cb 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -866,6 +866,8 @@
_IOC_SIZE (iocmd));
#endif
iolen = _IOC_SIZE (iocmd);
+ if (iolen > sizeof(arg))
+ return -EFAULT;
data = ifr->ifr_data + sizeof (iocmd);
if (copy_from_user (&arg, data, iolen))
return -EFAULT;
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 7fc66a6..514844e 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -757,6 +757,7 @@
}
/* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index a70dcef..2f40ff5 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -55,6 +55,7 @@
/****** 8188EUS ********/
{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7f1a7ce..b83ec37 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -785,7 +785,7 @@
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
@@ -3708,7 +3708,7 @@
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
iscsit_free_queue_reqs_for_conn(conn);
/*
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb..4ca8fd2 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@
}
cr = cmd->cr;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -297,7 +297,7 @@
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_conn_node); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@
cmd->sess = conn->sess;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 3976183..44a5471 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a448944..77e6531 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,31 +1074,36 @@
struct scatterlist *psg;
void *paddr, *addr;
unsigned int i, len, left;
- unsigned int offset = 0;
+ unsigned int offset = sg_off;
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
-
- len = min(psg->length, left);
- if (offset >= sg->length) {
- sg = sg_next(sg);
- offset = 0;
- sg_off = sg->offset;
- }
+ unsigned int psg_len, copied = 0;
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- addr = kmap_atomic(sg_page(sg)) + sg_off;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
- if (read)
- memcpy(paddr, addr, len);
- else
- memcpy(addr, paddr, len);
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
- left -= len;
- offset += len;
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ kunmap_atomic(addr);
+ }
kunmap_atomic(paddr);
- kunmap_atomic(addr);
}
}
@@ -1163,7 +1168,7 @@
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg;
+ struct scatterlist *dsg, *psg = sg;
sector_t sector = start;
void *daddr, *paddr;
int i, j, offset = sg_off;
@@ -1171,14 +1176,14 @@
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + sg->offset;
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
- if (offset >= sg->length) {
+ if (offset >= psg->length) {
kunmap_atomic(paddr);
- sg = sg_next(sg);
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
offset = 0;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 24b4f65d..2956250 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1601,6 +1601,9 @@
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 35c0664..5f88d76 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -136,6 +136,7 @@
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework.
@@ -210,8 +211,16 @@
tristate "ACPI INT3403 thermal driver"
depends on X86 && ACPI
help
- This driver uses ACPI INT3403 device objects. If present, it will
- register each INT3403 thermal sensor as a thermal zone.
+ Newer laptops and tablets that use ACPI may have thermal sensors
+ outside the core CPU/SOC for thermal safety reasons. These
+ temperature sensors are also exposed for the OS to use via the so
+ called INT3403 ACPI object. This driver will, on devices that have
+ such sensors, expose the temperature information from these sensors
+ to userspace via the normal thermal framework. This means that a wide
+ range of applications and GUI widgets can show this information to
+ the user or use this information for making decisions. For example,
+ the Intel Thermal Daemon can use this information to allow the user
+ to select his laptop to run without turning on the fans.
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 338a88b..71b0ec0 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -56,10 +56,15 @@
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
+static struct thermal_governor *def_governor;
+
static struct thermal_governor *__find_governor(const char *name)
{
struct thermal_governor *pos;
+ if (!name || !name[0])
+ return def_governor;
+
list_for_each_entry(pos, &thermal_governor_list, governor_list)
if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
return pos;
@@ -82,17 +87,23 @@
if (__find_governor(governor->name) == NULL) {
err = 0;
list_add(&governor->governor_list, &thermal_governor_list);
+ if (!def_governor && !strncmp(governor->name,
+ DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+ def_governor = governor;
}
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
+ /*
+ * only thermal zones with specified tz->tzp->governor_name
+ * may run with tz->govenor unset
+ */
if (pos->governor)
continue;
- if (pos->tzp)
- name = pos->tzp->governor_name;
- else
- name = DEFAULT_THERMAL_GOVERNOR;
+
+ name = pos->tzp->governor_name;
+
if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
pos->governor = governor;
}
@@ -342,8 +353,8 @@
static void handle_non_critical_trips(struct thermal_zone_device *tz,
int trip, enum thermal_trip_type trip_type)
{
- if (tz->governor)
- tz->governor->throttle(tz, trip);
+ tz->governor ? tz->governor->throttle(tz, trip) :
+ def_governor->throttle(tz, trip);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
cdev->ops = ops;
- cdev->updated = true;
+ cdev->updated = false;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@
if (tz->tzp)
tz->governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+ tz->governor = def_governor;
mutex_unlock(&thermal_governor_lock);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 972e1c7..081fd7e 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@
struct thermal_zone_device *tzone;
};
+static const struct thermal_zone_params pkg_temp_tz_params = {
+ .no_hwmon = true,
+};
+
/* List maintaining number of package instances */
static LIST_HEAD(phy_dev_list);
static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@
int err;
u32 tj_max;
struct phy_dev_entry *phy_dev_entry;
- char buffer[30];
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
@@ -440,13 +443,11 @@
phy_dev_entry->first_cpu = cpu;
phy_dev_entry->tj_max = tj_max;
phy_dev_entry->ref_cnt = 1;
- snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
- phy_dev_entry->phys_proc_id);
- phy_dev_entry->tzone = thermal_zone_device_register(buffer,
+ phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ?
0x03 : 0x01,
- phy_dev_entry, &tzone_ops, NULL, 0, 0);
+ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(phy_dev_entry->tzone)) {
err = PTR_ERR(phy_dev_entry->tzone);
goto err_ret_free;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index cf86e72..dc697ce 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,13 +433,10 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@
unsigned long flags;
int i, locked = 1;
- local_irq_save(flags);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@@ -507,8 +506,7 @@
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 380fb53..5faa8e9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,20 +844,16 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index db79b76..9a0f24f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,13 +1295,10 @@
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 45a8c6a..a2c40ed 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,20 +1195,16 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bd2715a..c74a00a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1267,17 +1267,16 @@
* @p: output buffer of at least 7 bytes
*
* Generate a name from a driver reference and write it to the output
- * buffer. Return the number of bytes written.
+ * buffer.
*
* Locking: None
*/
-static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+static void tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- return sprintf(p, "%s", driver->name);
+ strcpy(p, driver->name);
else
- return sprintf(p, "%s%d", driver->name,
- index + driver->name_base);
+ sprintf(p, "%s%d", driver->name, index + driver->name_base);
}
/**
@@ -3546,19 +3545,9 @@
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--) {
- struct tty_driver *driver;
- const char *name = cs[i]->name;
- int index = cs[i]->index;
-
- driver = cs[i]->device(cs[i], &index);
- if (driver) {
- count += tty_line_name(driver, index, buf + count);
- count += sprintf(buf + count, "%c", i ? ' ':'\n');
- } else
- count += sprintf(buf + count, "%s%d%c",
- name, index, i ? ' ':'\n');
- }
+ while (i--)
+ count += sprintf(buf + count, "%s%d%c",
+ cs[i]->name, cs[i]->index, i ? ' ':'\n');
console_unlock();
return count;
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index d8a55e8..0ffb0cb 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -39,17 +39,10 @@
lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
# define __rel(l, n, i) \
lock_release(&(l)->dep_map, n, i)
-# ifdef CONFIG_PROVE_LOCKING
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# else
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# endif
+#define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
+#define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
+#define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
+#define lockdep_release(l, n, i) __rel(l, n, i)
#else
# define lockdep_acquire(l, s, t, i) do { } while (0)
# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 80de2f8..4ab2cb6 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -105,7 +105,7 @@
do {
/* flush any pending transfer */
- hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
@@ -205,7 +205,7 @@
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
- hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c..062967c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -717,6 +717,10 @@
result = -ENOMEM;
goto err;
}
+
+ if (dev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(100);
+
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (result < 0) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063..739ee8e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Pro Webcams C920 and C930e */
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 888fbb4..e969eb8 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -360,24 +360,30 @@
bcm_writel(val, udc->iudma_regs + off);
}
-static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
@@ -638,7 +644,7 @@
} while (!last_bd);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
- ENETDMAC_CHANCFG_REG(iudma->ch_idx));
+ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
}
/**
@@ -694,9 +700,9 @@
bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
/* stop DMA, then wait for the hardware to wrap up */
- usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
+ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
- while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
+ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
ENETDMAC_CHANCFG_EN_MASK) {
udelay(1);
@@ -713,10 +719,10 @@
dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
ch_idx);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
- ENETDMAC_CHANCFG_REG(ch_idx));
+ ENETDMAC_CHANCFG_REG, ch_idx);
}
}
- usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
+ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
/* don't leave "live" HW-owned entries for the next guy to step on */
for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
@@ -728,11 +734,11 @@
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IRMASK_REG(ch_idx));
- usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
+ ENETDMAC_IRMASK_REG, ch_idx);
+ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
- usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
- usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
+ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
+ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
}
/**
@@ -2035,7 +2041,7 @@
spin_lock(&udc->lock);
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IR_REG(iudma->ch_idx));
+ ENETDMAC_IR_REG, iudma->ch_idx);
bep = iudma->bep;
rc = iudma_read(udc, iudma);
@@ -2175,18 +2181,18 @@
seq_printf(s, " [ep%d]:\n",
max_t(int, iudma_defaults[ch_idx].ep_num, 0));
seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
- usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
+ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
- sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
- sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
+ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
+ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
- usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
+ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
sram2 >> 16, sram2 & 0xffff,
sram3 >> 16, sram3 & 0xffff,
- usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
+ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 306a2b5..2b43343 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -585,7 +585,6 @@
char __user *buf, size_t len, int read)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_gadget *gadget = epfile->ffs->gadget;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len;
@@ -622,6 +621,12 @@
/* Allocate & copy */
if (!halt) {
/*
+ * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+ * before the waiting completes, so do not assign to 'gadget' earlier
+ */
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+
+ /*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index bf7a56b..69b76ef 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1157,7 +1157,7 @@
usb_gadget_set_selfpowered(gadget);
- if (gadget->is_otg) {
+ if (gadget_is_otg(gadget)) {
otg_descriptor.bmAttributes |= USB_OTG_HNP;
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index f04b2c3..dd9678f 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,7 +1629,7 @@
ep->ep.desc = NULL;
ep->halted = 0;
INIT_LIST_HEAD(&ep->queue);
- usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 4711427..81cda09 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -685,8 +685,15 @@
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -704,7 +711,7 @@
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
- spin_unlock(&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -815,7 +822,7 @@
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 47b858f..7ae0c4d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -238,6 +238,7 @@
int port;
int mask;
int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
@@ -272,6 +273,7 @@
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
+ fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -300,16 +302,32 @@
}
if (t1 != t2) {
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
+
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
if (changed && ehci->has_tdi_phy_lpm) {
- spin_unlock_irq(&ehci->lock);
- msleep(5); /* 5 ms for HCD to enter low-power mode */
spin_lock_irq(&ehci->lock);
-
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -322,8 +340,8 @@
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ spin_unlock_irq(&ehci->lock);
}
- spin_unlock_irq(&ehci->lock);
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6fe577d..924a6cc 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4733,6 +4733,9 @@
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
+ /* support to build packet from discontinuous buffers */
+ hcd->self.no_sg_constraint = 1;
+
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4757,14 +4760,6 @@
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
- xhci = hcd_to_xhci(hcd);
- /*
- * Support arbitrarily aligned sg-list entries on hosts without
- * TD fragment rules (which are currently unsupported).
- */
- if (xhci->hci_version < 0x100)
- hcd->self.no_sg_constraint = 1;
-
return 0;
}
@@ -4793,9 +4788,6 @@
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
- if (xhci->hci_version < 0x100)
- hcd->self.no_sg_constraint = 1;
-
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fc192ad..239ad0b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -477,8 +477,11 @@
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
schedule_delayed_work(
- &musb->finish_resume_work, 20);
+ &musb->finish_resume_work,
+ msecs_to_jiffies(20));
musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
@@ -2157,11 +2160,19 @@
void __iomem *musb_base = musb->mregs;
void __iomem *ep_target_regs;
void __iomem *epio;
+ u8 power;
musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
- musb_writeb(musb_base, MUSB_POWER, musb->context.power);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ed45572..abb38c3 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1183,6 +1183,9 @@
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index eb63443..e2d2d8c 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -135,7 +135,8 @@
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- schedule_delayed_work(&musb->finish_resume_work, 20);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
}
}
@@ -158,7 +159,6 @@
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
-
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
@@ -167,11 +167,22 @@
* detected".
*/
if (power & MUSB_POWER_RESUME) {
- while (time_before(jiffies, musb->rh_timer))
- msleep(1);
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
musb_writeb(mbase, MUSB_POWER,
- power & ~MUSB_POWER_RESUME);
- msleep(1);
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
}
power &= 0xf0;
@@ -180,7 +191,8 @@
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- schedule_delayed_work(&musb->deassert_reset_work, 50);
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
} else {
dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2a408cd..8aa59a2 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -659,7 +659,6 @@
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
}
return 0;
@@ -674,7 +673,6 @@
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
- phy_power_on(musb->phy);
}
return 0;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8546c8d..d204f74 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -159,32 +159,6 @@
return rc;
}
-#ifdef CONFIG_PM_SLEEP
-#define USB_PHY_SUSP_DIG_VOL 500000
-static int msm_hsusb_config_vddcx(int high)
-{
- int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
- int min_vol;
- int ret;
-
- if (high)
- min_vol = USB_PHY_VDD_DIG_VOL_MIN;
- else
- min_vol = USB_PHY_SUSP_DIG_VOL;
-
- ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("%s: unable to set the voltage for regulator "
- "HSUSB_VDDCX\n", __func__);
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-#endif
-
static int msm_hsusb_ldo_set_mode(int on)
{
int ret = 0;
@@ -440,7 +414,32 @@
#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+
+#define USB_PHY_SUSP_DIG_VOL 500000
+static int msm_hsusb_config_vddcx(int high)
+{
+ int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
+ int min_vol;
+ int ret;
+
+ if (high)
+ min_vol = USB_PHY_VDD_DIG_VOL_MIN;
+ else
+ min_vol = USB_PHY_SUSP_DIG_VOL;
+
+ ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator "
+ "HSUSB_VDDCX\n", __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+
+ return ret;
+}
+
static int msm_otg_suspend(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1733,22 +1732,18 @@
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
msm_otg_runtime_idle)
};
-#endif
static struct platform_driver msm_otg_driver = {
.remove = msm_otg_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
-#endif
},
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ee1f00f..44ab129 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -907,6 +907,8 @@
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1e2d369..e599fbf 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1320,3 +1320,9 @@
* Manufacturer: Smart GSM Team
*/
#define FTDI_Z3X_PID 0x0011
+
+/*
+ * Product: Cressi PC Interface
+ * Manufacturer: Cressi
+ */
+#define FTDI_CRESSI_PID 0x87d0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 216d20a..68fc9fe 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1526,7 +1526,8 @@
/* Cinterion */
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fb7a8f..54af4e9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -186,12 +186,12 @@
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index a0fa5de..e1e22e0 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -505,9 +505,13 @@
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -532,6 +536,12 @@
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0a025b8..e48d4a6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1001,6 +1001,12 @@
break;
}
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ if (unlikely(v_req.lun[0] != 1)) {
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
+ }
+
/* Extract the tpgt */
target = v_req.lun[1];
tpg = ACCESS_ONCE(vs_tpg[target]);
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index aaf2995..68b45fc 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -402,7 +402,7 @@
if (!found) {
pr_err("No W83697HF/HG could be found\n");
- ret = -EIO;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37d06ea..61a6ac8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -399,12 +399,26 @@
state = BP_EAGAIN;
break;
}
-
- pfn = page_to_pfn(page);
- frame_list[i] = pfn_to_mfn(pfn);
-
scrub_page(page);
+ frame_list[i] = page_to_pfn(page);
+ }
+
+ /*
+ * Ensure that ballooned highmem pages don't have kmaps.
+ *
+ * Do this before changing the p2m as kmap_flush_unused()
+ * reads PTEs to obtain pages (and hence needs the original
+ * p2m entry).
+ */
+ kmap_flush_unused();
+
+ /* Update direct mapping, invalidate P2M, and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = frame_list[i];
+ frame_list[i] = pfn_to_mfn(pfn);
+ page = pfn_to_page(pfn);
+
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
@@ -429,11 +443,9 @@
}
#endif
- balloon_append(pfn_to_page(pfn));
+ balloon_append(page);
}
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 2408473..80ef38c 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -41,19 +41,8 @@
static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- struct dentry *root;
- root = mount_pseudo(fs_type, "anon_inode:", NULL,
+ return mount_pseudo(fs_type, "anon_inode:", NULL,
&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
- if (!IS_ERR(root)) {
- struct super_block *s = root->d_sb;
- anon_inode_inode = alloc_anon_inode(s);
- if (IS_ERR(anon_inode_inode)) {
- dput(root);
- deactivate_locked_super(s);
- root = ERR_CAST(anon_inode_inode);
- }
- }
- return root;
}
static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@
static int __init anon_inode_init(void)
{
- int error;
-
- error = register_filesystem(&anon_inode_fs_type);
- if (error)
- goto err_exit;
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
- if (IS_ERR(anon_inode_mnt)) {
- error = PTR_ERR(anon_inode_mnt);
- goto err_unregister_filesystem;
- }
- return 0;
+ if (IS_ERR(anon_inode_mnt))
+ panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
-err_unregister_filesystem:
- unregister_filesystem(&anon_inode_fs_type);
-err_exit:
- panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+ anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ if (IS_ERR(anon_inode_inode))
+ panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+ return 0;
}
fs_initcall(anon_inode_init);
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0129b78..4f70f38 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -458,11 +458,10 @@
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
- unsigned int sectors, total, ret;
+ unsigned int sectors, ret = 0;
void *prot_buf = bio->bi_integrity->bip_buf;
int i;
- ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
@@ -484,8 +483,6 @@
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
- total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cf32f03..c0f3718 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -513,7 +513,7 @@
static inline unsigned int
get_rfc1002_length(void *buf)
{
- return be32_to_cpu(*((__be32 *)buf));
+ return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
static inline void
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 53c1507..834fce7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,31 +2579,19 @@
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc = -EACCES;
+ loff_t lock_pos = pos;
- BUG_ON(iocb->ki_pos != pos);
-
+ if (file->f_flags & O_APPEND)
+ lock_pos = i_size_read(inode);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
*/
down_read(&cinode->lock_sem);
- if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
+ if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP)) {
- mutex_lock(&inode->i_mutex);
- rc = __generic_file_aio_write(iocb, iov, nr_segs,
- &iocb->ki_pos);
- mutex_unlock(&inode->i_mutex);
- }
-
- if (rc > 0) {
- ssize_t err;
-
- err = generic_write_sync(file, iocb->ki_pos - rc, rc);
- if (err < 0)
- rc = err;
- }
-
+ CIFS_WRITE_OP))
+ rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
up_read(&cinode->lock_sem);
return rc;
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b375709..18cd565 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -270,6 +270,26 @@
iov->iov_len = rqst->rq_pagesz;
}
+static unsigned long
+rqst_len(struct smb_rqst *rqst)
+{
+ unsigned int i;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned long buflen = 0;
+
+ /* total up iov array first */
+ for (i = 0; i < rqst->rq_nvec; i++)
+ buflen += iov[i].iov_len;
+
+ /* add in the page array if there is one */
+ if (rqst->rq_npages) {
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
+ buflen += rqst->rq_tailsz;
+ }
+
+ return buflen;
+}
+
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
@@ -277,6 +297,7 @@
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
+ unsigned long send_length;
unsigned int i;
size_t total_len = 0, sent;
struct socket *ssocket = server->ssocket;
@@ -285,6 +306,14 @@
if (ssocket == NULL)
return -ENOTSOCK;
+ /* sanity check send length */
+ send_length = rqst_len(rqst);
+ if (send_length != smb_buf_length + 4) {
+ WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
+ send_length, smb_buf_length);
+ return -EIO;
+ }
+
cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
diff --git a/fs/compat.c b/fs/compat.c
index 6af20de..19252b9 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -72,8 +72,8 @@
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
*/
-asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t)
+COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
+ struct compat_utimbuf __user *, t)
{
struct timespec tv[2];
@@ -87,7 +87,7 @@
return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}
-asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, struct compat_timespec __user *t, int flags)
+COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags)
{
struct timespec tv[2];
@@ -102,7 +102,7 @@
return do_utimes(dfd, filename, t ? tv : NULL, flags);
}
-asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, struct compat_timeval __user *t)
+COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd, const char __user *, filename, struct compat_timeval __user *, t)
{
struct timespec tv[2];
@@ -121,7 +121,7 @@
return do_utimes(dfd, filename, t ? tv : NULL, 0);
}
-asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_timeval __user *t)
+COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t)
{
return compat_sys_futimesat(AT_FDCWD, filename, t);
}
@@ -159,8 +159,8 @@
return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
-asmlinkage long compat_sys_newstat(const char __user * filename,
- struct compat_stat __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
+ struct compat_stat __user *, statbuf)
{
struct kstat stat;
int error;
@@ -171,8 +171,8 @@
return cp_compat_stat(&stat, statbuf);
}
-asmlinkage long compat_sys_newlstat(const char __user * filename,
- struct compat_stat __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
+ struct compat_stat __user *, statbuf)
{
struct kstat stat;
int error;
@@ -184,9 +184,9 @@
}
#ifndef __ARCH_WANT_STAT64
-asmlinkage long compat_sys_newfstatat(unsigned int dfd,
- const char __user *filename,
- struct compat_stat __user *statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
+ const char __user *, filename,
+ struct compat_stat __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
@@ -198,8 +198,8 @@
}
#endif
-asmlinkage long compat_sys_newfstat(unsigned int fd,
- struct compat_stat __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
+ struct compat_stat __user *, statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
@@ -247,7 +247,7 @@
* The following statfs calls are copies of code from fs/statfs.c and
* should be checked against those from time to time
*/
-asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
+COMPAT_SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct compat_statfs __user *, buf)
{
struct kstatfs tmp;
int error = user_statfs(pathname, &tmp);
@@ -256,7 +256,7 @@
return error;
}
-asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf)
+COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *, buf)
{
struct kstatfs tmp;
int error = fd_statfs(fd, &tmp);
@@ -298,7 +298,7 @@
return 0;
}
-asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t sz, struct compat_statfs64 __user *buf)
+COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
{
struct kstatfs tmp;
int error;
@@ -312,7 +312,7 @@
return error;
}
-asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf)
+COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
{
struct kstatfs tmp;
int error;
@@ -331,7 +331,7 @@
* Given how simple this syscall is that apporach is more maintainable
* than the various conversion hacks.
*/
-asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
+COMPAT_SYSCALL_DEFINE2(ustat, unsigned, dev, struct compat_ustat __user *, u)
{
struct compat_ustat tmp;
struct kstatfs sbuf;
@@ -399,8 +399,8 @@
}
#endif
-asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
- unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
+ compat_ulong_t, arg)
{
mm_segment_t old_fs;
struct flock f;
@@ -468,16 +468,15 @@
return ret;
}
-asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
- unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
+ compat_ulong_t, arg)
{
if ((cmd == F_GETLK64) || (cmd == F_SETLK64) || (cmd == F_SETLKW64))
return -EINVAL;
return compat_sys_fcntl64(fd, cmd, arg);
}
-asmlinkage long
-compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
+COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
{
long ret;
aio_context_t ctx64;
@@ -496,32 +495,24 @@
return ret;
}
-asmlinkage long
-compat_sys_io_getevents(aio_context_t ctx_id,
- unsigned long min_nr,
- unsigned long nr,
- struct io_event __user *events,
- struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
+ compat_long_t, min_nr,
+ compat_long_t, nr,
+ struct io_event __user *, events,
+ struct compat_timespec __user *, timeout)
{
- long ret;
struct timespec t;
struct timespec __user *ut = NULL;
- ret = -EFAULT;
- if (unlikely(!access_ok(VERIFY_WRITE, events,
- nr * sizeof(struct io_event))))
- goto out;
if (timeout) {
if (get_compat_timespec(&t, timeout))
- goto out;
+ return -EFAULT;
ut = compat_alloc_user_space(sizeof(*ut));
if (copy_to_user(ut, &t, sizeof(t)) )
- goto out;
+ return -EFAULT;
}
- ret = sys_io_getevents(ctx_id, min_nr, nr, events, ut);
-out:
- return ret;
+ return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
}
/* A write operation does a read from user space and vice versa */
@@ -617,8 +608,8 @@
#define MAX_AIO_SUBMITS (PAGE_SIZE/sizeof(struct iocb *))
-asmlinkage long
-compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
+COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
+ int, nr, u32 __user *, iocb)
{
struct iocb __user * __user *iocb64;
long ret;
@@ -770,10 +761,10 @@
#define NCPFS_NAME "ncpfs"
#define NFS4_NAME "nfs4"
-asmlinkage long compat_sys_mount(const char __user * dev_name,
- const char __user * dir_name,
- const char __user * type, unsigned long flags,
- const void __user * data)
+COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
+ const char __user *, dir_name,
+ const char __user *, type, compat_ulong_t, flags,
+ const void __user *, data)
{
char *kernel_type;
unsigned long data_page;
@@ -869,8 +860,8 @@
return -EFAULT;
}
-asmlinkage long compat_sys_old_readdir(unsigned int fd,
- struct compat_old_linux_dirent __user *dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+ struct compat_old_linux_dirent __user *, dirent, unsigned int, count)
{
int error;
struct fd f = fdget(fd);
@@ -948,8 +939,8 @@
return -EFAULT;
}
-asmlinkage long compat_sys_getdents(unsigned int fd,
- struct compat_linux_dirent __user *dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
+ struct compat_linux_dirent __user *, dirent, unsigned int, count)
{
struct fd f;
struct compat_linux_dirent __user * lastdirent;
@@ -981,7 +972,7 @@
return error;
}
-#ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
+#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
struct compat_getdents_callback64 {
struct dir_context ctx;
@@ -1033,8 +1024,8 @@
return -EFAULT;
}
-asmlinkage long compat_sys_getdents64(unsigned int fd,
- struct linux_dirent64 __user * dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+ struct linux_dirent64 __user *, dirent, unsigned int, count)
{
struct fd f;
struct linux_dirent64 __user * lastdirent;
@@ -1066,7 +1057,7 @@
fdput(f);
return error;
}
-#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
+#endif /* __ARCH_WANT_COMPAT_SYS_GETDENTS64 */
/*
* Exactly like fs/open.c:sys_open(), except that it doesn't set the
@@ -1287,9 +1278,9 @@
return ret;
}
-asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
- compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp)
+COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
+ compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
+ struct compat_timeval __user *, tvp)
{
struct timespec end_time, *to = NULL;
struct compat_timeval tv;
@@ -1320,7 +1311,7 @@
compat_uptr_t tvp;
};
-asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg)
+COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
{
struct compat_sel_arg_struct a;
@@ -1381,9 +1372,9 @@
return ret;
}
-asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
- compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp, void __user *sig)
+COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
+ compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
+ struct compat_timespec __user *, tsp, void __user *, sig)
{
compat_size_t sigsetsize = 0;
compat_uptr_t up = 0;
@@ -1400,9 +1391,9 @@
sigsetsize);
}
-asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
- unsigned int nfds, struct compat_timespec __user *tsp,
- const compat_sigset_t __user *sigmask, compat_size_t sigsetsize)
+COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
+ unsigned int, nfds, struct compat_timespec __user *, tsp,
+ const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
compat_sigset_t ss32;
sigset_t ksigmask, sigsaved;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 3881610..e822890 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1538,9 +1538,10 @@
return ioctl_pointer[i] == xcmd;
}
-asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ compat_ulong_t, arg32)
{
+ unsigned long arg = arg32;
struct fd f = fdget(fd);
int error = -EBADF;
if (!f.file)
diff --git a/fs/dcache.c b/fs/dcache.c
index 265e0ce..ca02c13 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2833,9 +2833,9 @@
u32 dlen = ACCESS_ONCE(name->len);
char *p;
- if (*buflen < dlen + 1)
- return -ENAMETOOLONG;
*buflen -= dlen + 1;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
p = *buffer -= dlen + 1;
*p++ = '/';
while (dlen--) {
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 8dd524f..cdb2971 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -21,7 +21,7 @@
u32 attributes;
struct inode *inode = file->f_mapping->host;
unsigned long datasize = count - sizeof(attributes);
- ssize_t bytes = 0;
+ ssize_t bytes;
bool set = false;
if (count < sizeof(attributes))
@@ -33,14 +33,9 @@
if (attributes & ~(EFI_VARIABLE_MASK))
return -EINVAL;
- data = kmalloc(datasize, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
- bytes = -EFAULT;
- goto out;
- }
+ data = memdup_user(userbuf + sizeof(attributes), datasize);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
bytes = efivar_entry_set_get_size(var, attributes, &datasize,
data, &set);
diff --git a/fs/exec.c b/fs/exec.c
index 3d78fcc..4f59402 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1619,9 +1619,9 @@
return do_execve(getname(filename), argv, envp);
}
#ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_execve(const char __user * filename,
- const compat_uptr_t __user * argv,
- const compat_uptr_t __user * envp)
+COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
+ const compat_uptr_t __user *, argv,
+ const compat_uptr_t __user *, envp)
{
return compat_do_execve(getname(filename), argv, envp);
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6e39895..24bfd7f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/aio.h>
+#include <linux/bitops.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -3921,18 +3922,20 @@
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
+ unsigned int new_fl = 0;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT4_SYNC_FL)
- inode->i_flags |= S_SYNC;
+ new_fl |= S_SYNC;
if (flags & EXT4_APPEND_FL)
- inode->i_flags |= S_APPEND;
+ new_fl |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
+ new_fl |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
- inode->i_flags |= S_NOATIME;
+ new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
- inode->i_flags |= S_DIRSYNC;
+ new_fl |= S_DIRSYNC;
+ set_mask_bits(&inode->i_flags,
+ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
diff --git a/fs/file.c b/fs/file.c
index db25c2b..b61293b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -497,7 +497,7 @@
error = fd;
#if 1
/* Sanity check */
- if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
+ if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
rcu_assign_pointer(fdt->fd[fd], NULL);
}
@@ -683,35 +683,54 @@
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed)
+static unsigned long __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
- *fput_needed = 0;
if (atomic_read(&files->count) == 1) {
file = __fcheck_files(files, fd);
- if (file && (file->f_mode & mask))
- file = NULL;
+ if (!file || unlikely(file->f_mode & mask))
+ return 0;
+ return (unsigned long)file;
} else {
file = __fget(fd, mask);
- if (file)
- *fput_needed = 1;
+ if (!file)
+ return 0;
+ return FDPUT_FPUT | (unsigned long)file;
}
-
- return file;
}
-struct file *fget_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget(unsigned int fd)
{
- return __fget_light(fd, FMODE_PATH, fput_needed);
+ return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(__fdget);
-struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget_raw(unsigned int fd)
{
- return __fget_light(fd, 0, fput_needed);
+ return __fget_light(fd, 0);
}
+unsigned long __fdget_pos(unsigned int fd)
+{
+ unsigned long v = __fdget(fd);
+ struct file *file = (struct file *)(v & ~3);
+
+ if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
+ if (file_count(file) > 1) {
+ v |= FDPUT_POS_UNLOCK;
+ mutex_lock(&file->f_pos_lock);
+ }
+ }
+ return v;
+}
+
+/*
+ * We only lock f_pos if we have threads or if the file might be
+ * shared with another process. In both cases we'll have an elevated
+ * file count (done either by fdget() or by fork()).
+ */
+
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
diff --git a/fs/file_table.c b/fs/file_table.c
index 5fff903..5b24008 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -135,6 +135,7 @@
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
+ mutex_init(&f->f_pos_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return f;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e0259a1..d754e3c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -40,18 +40,13 @@
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
- /*
- * Write only inodes dirtied before this time. Don't forget to set
- * older_than_this_is_set when you set this.
- */
- unsigned long older_than_this;
+ unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
- unsigned int older_than_this_is_set:1;
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
@@ -252,10 +247,10 @@
int do_sb_sort = 0;
int moved = 0;
- WARN_ON_ONCE(!work->older_than_this_is_set);
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
- if (inode_dirtied_after(inode, work->older_than_this))
+ if (work->older_than_this &&
+ inode_dirtied_after(inode, *work->older_than_this))
break;
list_move(&inode->i_wb_list, &tmp);
moved++;
@@ -742,8 +737,6 @@
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = reason,
- .older_than_this = jiffies,
- .older_than_this_is_set = 1,
};
spin_lock(&wb->list_lock);
@@ -802,13 +795,12 @@
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
+ unsigned long oldest_jif;
struct inode *inode;
long progress;
- if (!work->older_than_this_is_set) {
- work->older_than_this = jiffies;
- work->older_than_this_is_set = 1;
- }
+ oldest_jif = jiffies;
+ work->older_than_this = &oldest_jif;
spin_lock(&wb->list_lock);
for (;;) {
@@ -842,10 +834,10 @@
* safe.
*/
if (work->for_kupdate) {
- work->older_than_this = jiffies -
+ oldest_jif = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
- work->older_than_this = jiffies;
+ oldest_jif = jiffies;
trace_writeback_start(wb->bdi, work);
if (list_empty(&wb->b_io))
@@ -1357,21 +1349,18 @@
/**
* sync_inodes_sb - sync sb inode pages
- * @sb: the superblock
- * @older_than_this: timestamp
+ * @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
- * superblock that has been dirtied before given timestamp.
+ * super_block.
*/
-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
+void sync_inodes_sb(struct super_block *sb)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
- .older_than_this = older_than_this,
- .older_than_this_is_set = 1,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 968ce41..32602c6 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -103,6 +103,8 @@
folder = &entry->folder;
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
+ folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date =
folder->create_date =
@@ -203,6 +205,36 @@
return hfs_brec_find(fd, hfs_find_rec_by_key);
}
+static void hfsplus_subfolders_inc(struct inode *dir)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+ /*
+ * Increment subfolder count. Note, the value is only meaningful
+ * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
+ */
+ HFSPLUS_I(dir)->subfolders++;
+ }
+}
+
+static void hfsplus_subfolders_dec(struct inode *dir)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+ /*
+ * Decrement subfolder count. Note, the value is only meaningful
+ * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
+ *
+ * Check for zero. Some subfolders may have been created
+ * by an implementation ignorant of this counter.
+ */
+ if (HFSPLUS_I(dir)->subfolders)
+ HFSPLUS_I(dir)->subfolders--;
+ }
+}
+
int hfsplus_create_cat(u32 cnid, struct inode *dir,
struct qstr *str, struct inode *inode)
{
@@ -247,6 +279,8 @@
goto err1;
dir->i_size++;
+ if (S_ISDIR(inode->i_mode))
+ hfsplus_subfolders_inc(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
@@ -336,6 +370,8 @@
goto out;
dir->i_size--;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_dec(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
@@ -380,6 +416,7 @@
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
+ type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
@@ -394,6 +431,8 @@
if (err)
goto out;
dst_dir->i_size++;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_inc(dst_dir);
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */
@@ -405,6 +444,8 @@
if (err)
goto out;
src_dir->i_size--;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_dec(src_dir);
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 08846425b..62d571e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -242,6 +242,7 @@
*/
sector_t fs_blocks;
u8 userflags; /* BSD user file flags */
+ u32 subfolders; /* Subfolder count (HFSX only) */
struct list_head open_dir_list;
loff_t phys_size;
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 8ffb3a8..5a12682 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -261,7 +261,7 @@
struct DInfo user_info;
struct DXInfo finder_info;
__be32 text_encoding;
- u32 reserved;
+ __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
} __packed;
/* HFS file info (stolen from hfs.h) */
@@ -301,11 +301,13 @@
struct hfsplus_fork_raw rsrc_fork;
} __packed;
-/* File attribute bits */
+/* File and folder flag bits */
#define HFSPLUS_FILE_LOCKED 0x0001
#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
#define HFSPLUS_XATTR_EXISTS 0x0004
#define HFSPLUS_ACL_EXISTS 0x0008
+#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
+ * (HFSX only) */
/* HFS+ catalog thread (part of a cat_entry) */
struct hfsplus_cat_thread {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fa929f3..a4f45bd 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -375,6 +375,7 @@
hip->extent_state = 0;
hip->flags = 0;
hip->userflags = 0;
+ hip->subfolders = 0;
memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->alloc_blocks = 0;
@@ -494,6 +495,10 @@
inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
+ if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
+ HFSPLUS_I(inode)->subfolders =
+ be32_to_cpu(folder->subfolders);
+ }
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (type == HFSPLUS_FILE) {
@@ -566,6 +571,10 @@
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
folder->valence = cpu_to_be32(inode->i_size - 2);
+ if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
+ folder->subfolders =
+ cpu_to_be32(HFSPLUS_I(inode)->subfolders);
+ }
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
} else if (HFSPLUS_IS_RSRC(inode)) {
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 968eab5..68537e8 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -75,7 +75,7 @@
int token;
if (!input)
- return 0;
+ return 1;
while ((p = strsep(&input, ",")) != NULL) {
if (!*p)
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 0d6ce89..0f4152d 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -94,6 +94,7 @@
* @fs_type: file_system_type of the fs being mounted
* @flags: mount flags specified for the mount
* @root: kernfs_root of the hierarchy being mounted
+ * @new_sb_created: tell the caller if we allocated a new superblock
* @ns: optional namespace tag of the mount
*
* This is to be called from each kernfs user's file_system_type->mount()
@@ -104,7 +105,8 @@
* The return value can be passed to the vfs layer verbatim.
*/
struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns)
+ struct kernfs_root *root, bool *new_sb_created,
+ const void *ns)
{
struct super_block *sb;
struct kernfs_super_info *info;
@@ -122,6 +124,10 @@
kfree(info);
if (IS_ERR(sb))
return ERR_CAST(sb);
+
+ if (new_sb_created)
+ *new_sb_created = !sb->s_root;
+
if (!sb->s_root) {
error = kernfs_fill_super(sb);
if (error) {
diff --git a/fs/mount.h b/fs/mount.h
index a17458c..b29e42f 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -19,13 +19,13 @@
};
struct mountpoint {
- struct list_head m_hash;
+ struct hlist_node m_hash;
struct dentry *m_dentry;
int m_count;
};
struct mount {
- struct list_head mnt_hash;
+ struct hlist_node mnt_hash;
struct mount *mnt_parent;
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
diff --git a/fs/namei.c b/fs/namei.c
index 385f781..4b491b4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1109,7 +1109,7 @@
return false;
if (!d_mountpoint(path->dentry))
- break;
+ return true;
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
@@ -1125,20 +1125,7 @@
*/
*inode = path->dentry->d_inode;
}
- return true;
-}
-
-static void follow_mount_rcu(struct nameidata *nd)
-{
- while (d_mountpoint(nd->path.dentry)) {
- struct mount *mounted;
- mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
- if (!mounted)
- break;
- nd->path.mnt = &mounted->mnt;
- nd->path.dentry = mounted->mnt.mnt_root;
- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- }
+ return read_seqretry(&mount_lock, nd->m_seq);
}
static int follow_dotdot_rcu(struct nameidata *nd)
@@ -1166,7 +1153,17 @@
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
- follow_mount_rcu(nd);
+ while (d_mountpoint(nd->path.dentry)) {
+ struct mount *mounted;
+ mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
+ if (!mounted)
+ break;
+ nd->path.mnt = &mounted->mnt;
+ nd->path.dentry = mounted->mnt.mnt_root;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ if (!read_seqretry(&mount_lock, nd->m_seq))
+ goto failed;
+ }
nd->inode = nd->path.dentry->d_inode;
return 0;
@@ -1884,7 +1881,7 @@
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
- if (f.need_put)
+ if (f.flags & FDPUT_FPUT)
*fp = f.file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
rcu_read_lock();
diff --git a/fs/namespace.c b/fs/namespace.c
index 22e5367..2ffc5a2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,11 +23,34 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
+#include <linux/bootmem.h>
#include "pnode.h"
#include "internal.h"
-#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
-#define HASH_SIZE (1UL << HASH_SHIFT)
+static unsigned int m_hash_mask __read_mostly;
+static unsigned int m_hash_shift __read_mostly;
+static unsigned int mp_hash_mask __read_mostly;
+static unsigned int mp_hash_shift __read_mostly;
+
+static __initdata unsigned long mhash_entries;
+static int __init set_mhash_entries(char *str)
+{
+ if (!str)
+ return 0;
+ mhash_entries = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("mhash_entries=", set_mhash_entries);
+
+static __initdata unsigned long mphash_entries;
+static int __init set_mphash_entries(char *str)
+{
+ if (!str)
+ return 0;
+ mphash_entries = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("mphash_entries=", set_mphash_entries);
static int event;
static DEFINE_IDA(mnt_id_ida);
@@ -36,8 +59,8 @@
static int mnt_id_start = 0;
static int mnt_group_start = 1;
-static struct list_head *mount_hashtable __read_mostly;
-static struct list_head *mountpoint_hashtable __read_mostly;
+static struct hlist_head *mount_hashtable __read_mostly;
+static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static DECLARE_RWSEM(namespace_sem);
@@ -55,12 +78,19 @@
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
-static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
- tmp = tmp + (tmp >> HASH_SHIFT);
- return tmp & (HASH_SIZE - 1);
+ tmp = tmp + (tmp >> m_hash_shift);
+ return &mount_hashtable[tmp & m_hash_mask];
+}
+
+static inline struct hlist_head *mp_hash(struct dentry *dentry)
+{
+ unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
+ tmp = tmp + (tmp >> mp_hash_shift);
+ return &mountpoint_hashtable[tmp & mp_hash_mask];
}
/*
@@ -187,7 +217,7 @@
mnt->mnt_writers = 0;
#endif
- INIT_LIST_HEAD(&mnt->mnt_hash);
+ INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
@@ -575,10 +605,10 @@
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
- struct list_head *head = mount_hashtable + hash(mnt, dentry);
+ struct hlist_head *head = m_hash(mnt, dentry);
struct mount *p;
- list_for_each_entry_rcu(p, head, mnt_hash)
+ hlist_for_each_entry_rcu(p, head, mnt_hash)
if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
return p;
return NULL;
@@ -590,13 +620,17 @@
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
- struct list_head *head = mount_hashtable + hash(mnt, dentry);
- struct mount *p;
-
- list_for_each_entry_reverse(p, head, mnt_hash)
- if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
- return p;
- return NULL;
+ struct mount *p, *res;
+ res = p = __lookup_mnt(mnt, dentry);
+ if (!p)
+ goto out;
+ hlist_for_each_entry_continue(p, mnt_hash) {
+ if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+ break;
+ res = p;
+ }
+out:
+ return res;
}
/*
@@ -633,11 +667,11 @@
static struct mountpoint *new_mountpoint(struct dentry *dentry)
{
- struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry);
+ struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
int ret;
- list_for_each_entry(mp, chain, m_hash) {
+ hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
@@ -659,7 +693,7 @@
mp->m_dentry = dentry;
mp->m_count = 1;
- list_add(&mp->m_hash, chain);
+ hlist_add_head(&mp->m_hash, chain);
return mp;
}
@@ -670,7 +704,7 @@
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
- list_del(&mp->m_hash);
+ hlist_del(&mp->m_hash);
kfree(mp);
}
}
@@ -712,7 +746,7 @@
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
- list_del_init(&mnt->mnt_hash);
+ hlist_del_init_rcu(&mnt->mnt_hash);
put_mountpoint(mnt->mnt_mp);
mnt->mnt_mp = NULL;
}
@@ -739,15 +773,14 @@
struct mountpoint *mp)
{
mnt_set_mountpoint(parent, mp, mnt);
- list_add_tail(&mnt->mnt_hash, mount_hashtable +
- hash(&parent->mnt, mp->m_dentry));
+ hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
/*
* vfsmount lock must be held for write
*/
-static void commit_tree(struct mount *mnt)
+static void commit_tree(struct mount *mnt, struct mount *shadows)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
@@ -762,8 +795,11 @@
list_splice(&head, n->list.prev);
- list_add_tail(&mnt->mnt_hash, mount_hashtable +
- hash(&parent->mnt, mnt->mnt_mountpoint));
+ if (shadows)
+ hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
+ else
+ hlist_add_head_rcu(&mnt->mnt_hash,
+ m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
touch_mnt_namespace(n);
}
@@ -1153,26 +1189,28 @@
EXPORT_SYMBOL(may_umount);
-static LIST_HEAD(unmounted); /* protected by namespace_sem */
+static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static void namespace_unlock(void)
{
struct mount *mnt;
- LIST_HEAD(head);
+ struct hlist_head head = unmounted;
- if (likely(list_empty(&unmounted))) {
+ if (likely(hlist_empty(&head))) {
up_write(&namespace_sem);
return;
}
- list_splice_init(&unmounted, &head);
+ head.first->pprev = &head.first;
+ INIT_HLIST_HEAD(&unmounted);
+
up_write(&namespace_sem);
synchronize_rcu();
- while (!list_empty(&head)) {
- mnt = list_first_entry(&head, struct mount, mnt_hash);
- list_del_init(&mnt->mnt_hash);
+ while (!hlist_empty(&head)) {
+ mnt = hlist_entry(head.first, struct mount, mnt_hash);
+ hlist_del_init(&mnt->mnt_hash);
if (mnt->mnt_ex_mountpoint.mnt)
path_put(&mnt->mnt_ex_mountpoint);
mntput(&mnt->mnt);
@@ -1193,16 +1231,19 @@
*/
void umount_tree(struct mount *mnt, int how)
{
- LIST_HEAD(tmp_list);
+ HLIST_HEAD(tmp_list);
struct mount *p;
+ struct mount *last = NULL;
- for (p = mnt; p; p = next_mnt(p, mnt))
- list_move(&p->mnt_hash, &tmp_list);
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ hlist_del_init_rcu(&p->mnt_hash);
+ hlist_add_head(&p->mnt_hash, &tmp_list);
+ }
if (how)
propagate_umount(&tmp_list);
- list_for_each_entry(p, &tmp_list, mnt_hash) {
+ hlist_for_each_entry(p, &tmp_list, mnt_hash) {
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
@@ -1220,8 +1261,13 @@
p->mnt_mp = NULL;
}
change_mnt_propagation(p, MS_PRIVATE);
+ last = p;
}
- list_splice(&tmp_list, &unmounted);
+ if (last) {
+ last->mnt_hash.next = unmounted.first;
+ unmounted.first = tmp_list.first;
+ unmounted.first->pprev = &unmounted.first;
+ }
}
static void shrink_submounts(struct mount *mnt);
@@ -1605,24 +1651,23 @@
struct mountpoint *dest_mp,
struct path *parent_path)
{
- LIST_HEAD(tree_list);
+ HLIST_HEAD(tree_list);
struct mount *child, *p;
+ struct hlist_node *n;
int err;
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
goto out;
- }
- err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
- if (err)
- goto out_cleanup_ids;
-
- lock_mount_hash();
-
- if (IS_MNT_SHARED(dest_mnt)) {
+ err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+ if (err)
+ goto out_cleanup_ids;
+ lock_mount_hash();
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
+ } else {
+ lock_mount_hash();
}
if (parent_path) {
detach_mnt(source_mnt, parent_path);
@@ -1630,20 +1675,22 @@
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
- commit_tree(source_mnt);
+ commit_tree(source_mnt, NULL);
}
- list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
- list_del_init(&child->mnt_hash);
- commit_tree(child);
+ hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+ struct mount *q;
+ hlist_del_init(&child->mnt_hash);
+ q = __lookup_mnt_last(&child->mnt_parent->mnt,
+ child->mnt_mountpoint);
+ commit_tree(child, q);
}
unlock_mount_hash();
return 0;
out_cleanup_ids:
- if (IS_MNT_SHARED(dest_mnt))
- cleanup_group_ids(source_mnt, NULL);
+ cleanup_group_ids(source_mnt, NULL);
out:
return err;
}
@@ -2777,18 +2824,24 @@
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
- mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
- mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
+ mount_hashtable = alloc_large_system_hash("Mount-cache",
+ sizeof(struct hlist_head),
+ mhash_entries, 19,
+ 0,
+ &m_hash_shift, &m_hash_mask, 0, 0);
+ mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
+ sizeof(struct hlist_head),
+ mphash_entries, 19,
+ 0,
+ &mp_hash_shift, &mp_hash_mask, 0, 0);
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
- printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
-
- for (u = 0; u < HASH_SIZE; u++)
- INIT_LIST_HEAD(&mount_hashtable[u]);
- for (u = 0; u < HASH_SIZE; u++)
- INIT_LIST_HEAD(&mountpoint_hashtable[u]);
+ for (u = 0; u <= m_hash_mask; u++)
+ INIT_HLIST_HEAD(&mount_hashtable[u]);
+ for (u = 0; u <= mp_hash_mask; u++)
+ INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
kernfs_init();
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ef792f2..5d8ccec 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -659,16 +659,19 @@
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation == NULL)
+ goto out_enoent;
- if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
- rcu_read_unlock();
- return -ENOENT;
- }
+ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
+ goto out_enoent;
nfs_mark_return_delegation(server, delegation);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
return 0;
+out_enoent:
+ rcu_read_unlock();
+ return -ENOENT;
}
static struct inode *
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 28a0a3c..360114a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -164,17 +164,16 @@
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
nfs_fscache_invalidate(inode);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_LABEL
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE;
} else
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_LABEL
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE;
+ nfs_zap_label_cache_locked(nfsi);
}
void nfs_zap_caches(struct inode *inode)
@@ -266,6 +265,13 @@
}
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+static void nfs_clear_label_invalid(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
+ spin_unlock(&inode->i_lock);
+}
+
void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -283,6 +289,7 @@
__func__,
(char *)label->label,
label->len, error);
+ nfs_clear_label_invalid(inode);
}
}
@@ -1648,7 +1655,7 @@
inode->i_blocks = fattr->du.nfs2.blocks;
/* Update attrtimeo value if we're out of the unstable period */
- if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) {
+ if (invalid & NFS_INO_INVALID_ATTR) {
nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = now;
@@ -1661,7 +1668,6 @@
}
}
invalid &= ~NFS_INO_INVALID_ATTR;
- invalid &= ~NFS_INO_INVALID_LABEL;
/* Don't invalidate the data if we were to blame */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8b5cc04..b46cf5a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -176,7 +176,8 @@
extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
struct nfs_fh *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen);
+ struct sockaddr *sap, size_t salen,
+ struct net *net);
extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
@@ -279,9 +280,18 @@
}
return;
}
+
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+ if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
+ nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
+}
#else
static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
static inline void nfs4_label_free(void *label) {}
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
/* proc.c */
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index aa9bc973..a462ef0 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -18,6 +18,7 @@
#include <linux/lockd/bind.h>
#include <linux/nfs_mount.h>
#include <linux/freezer.h>
+#include <linux/xattr.h>
#include "iostat.h"
#include "internal.h"
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 860ad26..0e46d3d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1135,6 +1135,7 @@
* @hostname: new end-point's hostname
* @sap: new end-point's socket address
* @salen: size of "sap"
+ * @net: net namespace
*
* The nfs_server must be quiescent before this function is invoked.
* Either its session is drained (NFSv4.1+), or its transport is
@@ -1143,13 +1144,13 @@
* Returns zero on success, or a negative errno value.
*/
int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen)
+ struct sockaddr *sap, size_t salen, struct net *net)
{
struct nfs_client *clp = server->nfs_client;
struct rpc_clnt *clnt = server->client;
struct xprt_create xargs = {
.ident = clp->cl_proto,
- .net = &init_net,
+ .net = net,
.dstaddr = sap,
.addrlen = salen,
.servername = hostname,
@@ -1189,7 +1190,7 @@
error = nfs4_set_client(server, hostname, sap, salen, buf,
clp->cl_rpcclient->cl_auth->au_flavor,
clp->cl_proto, clnt->cl_timeout,
- clp->cl_minorversion, clp->cl_net);
+ clp->cl_minorversion, net);
nfs_put_client(clp);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 12c8132..b9a35c0 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -324,8 +324,9 @@
&rdata->res.seq_res,
task))
return;
- nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
- rdata->args.lock_context, FMODE_READ);
+ if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
+ rdata->args.lock_context, FMODE_READ) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}
static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -435,8 +436,9 @@
&wdata->res.seq_res,
task))
return;
- nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
- wdata->args.lock_context, FMODE_WRITE);
+ if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
+ wdata->args.lock_context, FMODE_WRITE) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}
static void filelayout_write_call_done(struct rpc_task *task, void *data)
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 4e7f05d..3d5dbf8 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -121,9 +121,8 @@
}
static size_t nfs_parse_server_name(char *string, size_t len,
- struct sockaddr *sa, size_t salen, struct nfs_server *server)
+ struct sockaddr *sa, size_t salen, struct net *net)
{
- struct net *net = rpc_net_ns(server->client);
ssize_t ret;
ret = rpc_pton(net, string, len, sa, salen);
@@ -223,6 +222,7 @@
const struct nfs4_fs_location *location)
{
const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
struct vfsmount *mnt = ERR_PTR(-ENOENT);
char *mnt_path;
unsigned int maxbuflen;
@@ -248,8 +248,7 @@
continue;
mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
- mountdata->addr, addr_bufsize,
- NFS_SB(mountdata->sb));
+ mountdata->addr, addr_bufsize, net);
if (mountdata->addrlen == 0)
continue;
@@ -419,6 +418,7 @@
const struct nfs4_fs_location *location)
{
const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct net *net = rpc_net_ns(server->client);
struct sockaddr *sap;
unsigned int s;
size_t salen;
@@ -440,7 +440,7 @@
continue;
salen = nfs_parse_server_name(buf->data, buf->len,
- sap, addr_bufsize, server);
+ sap, addr_bufsize, net);
if (salen == 0)
continue;
rpc_set_port(sap, NFS_PORT);
@@ -450,7 +450,7 @@
if (hostname == NULL)
break;
- error = nfs4_update_server(server, hostname, sap, salen);
+ error = nfs4_update_server(server, hostname, sap, salen, net);
kfree(hostname);
if (error == 0)
break;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2da6a69..450bfed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2398,13 +2398,16 @@
if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
/* Use that stateid */
- } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) {
+ } else if (truncate && state != NULL) {
struct nfs_lockowner lockowner = {
.l_owner = current->files,
.l_pid = current->tgid,
};
- nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
- &lockowner);
+ if (!nfs4_valid_open_stateid(state))
+ return -EBADF;
+ if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
+ &lockowner) == -EIO)
+ return -EBADF;
} else
nfs4_stateid_copy(&arg.stateid, &zero_stateid);
@@ -4011,8 +4014,9 @@
{
nfs4_stateid current_stateid;
- if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode))
- return false;
+ /* If the current stateid represents a lost lock, then exit */
+ if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO)
+ return true;
return nfs4_stateid_match(stateid, ¤t_stateid);
}
@@ -5828,8 +5832,7 @@
struct nfs4_lock_state *lsp;
struct nfs_server *server;
struct nfs_release_lockowner_args args;
- struct nfs4_sequence_args seq_args;
- struct nfs4_sequence_res seq_res;
+ struct nfs_release_lockowner_res res;
unsigned long timestamp;
};
@@ -5837,7 +5840,7 @@
{
struct nfs_release_lockowner_data *data = calldata;
nfs40_setup_sequence(data->server,
- &data->seq_args, &data->seq_res, task);
+ &data->args.seq_args, &data->res.seq_res, task);
data->timestamp = jiffies;
}
@@ -5846,7 +5849,7 @@
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
- nfs40_sequence_done(task, &data->seq_res);
+ nfs40_sequence_done(task, &data->res.seq_res);
switch (task->tk_status) {
case 0:
@@ -5887,7 +5890,6 @@
data = kmalloc(sizeof(*data), GFP_NOFS);
if (!data)
return -ENOMEM;
- nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
data->lsp = lsp;
data->server = server;
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
@@ -5895,6 +5897,8 @@
data->args.lock_owner.s_dev = server->s_dev;
msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
return 0;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e5be725..0deb321 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -974,9 +974,6 @@
else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
nfs4_stateid_copy(dst, &lsp->ls_stateid);
ret = 0;
- smp_rmb();
- if (!list_empty(&lsp->ls_seqid.list))
- ret = -EWOULDBLOCK;
}
spin_unlock(&state->state_lock);
nfs4_put_lock_state(lsp);
@@ -984,10 +981,9 @@
return ret;
}
-static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
+static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
const nfs4_stateid *src;
- int ret;
int seq;
do {
@@ -996,12 +992,7 @@
if (test_bit(NFS_OPEN_STATE, &state->flags))
src = &state->open_stateid;
nfs4_stateid_copy(dst, src);
- ret = 0;
- smp_rmb();
- if (!list_empty(&state->owner->so_seqid.list))
- ret = -EWOULDBLOCK;
} while (read_seqretry(&state->seqlock, seq));
- return ret;
}
/*
@@ -1015,15 +1006,19 @@
if (ret == -EIO)
/* A lost lock - don't even consider delegations */
goto out;
- if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
+ /* returns true if delegation stateid found and copied */
+ if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
+ ret = 0;
goto out;
+ }
if (ret != -ENOENT)
/* nfs4_copy_delegation_stateid() didn't over-write
* dst, so it still has the lock stateid which we now
* choose to use.
*/
goto out;
- ret = nfs4_copy_open_stateid(dst, state);
+ nfs4_copy_open_stateid(dst, state);
+ ret = 0;
out:
if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
dst->seqid = 0;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 017d3cb5..6d7be3f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -449,6 +449,7 @@
fh_lock(fhp);
host_err = notify_change(dentry, iap, NULL);
fh_unlock(fhp);
+ err = nfserrno(host_err);
out_put_write_access:
if (size_change)
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 0b9ff43..abc8cbc 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -86,7 +86,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 0e792f5..dc638f7 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -147,7 +147,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *fanotify_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
int ret = 0;
struct fanotify_event_info *event;
@@ -192,10 +192,12 @@
ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
if (ret) {
- BUG_ON(mask & FAN_ALL_PERM_EVENTS);
+ /* Permission events shouldn't be merged */
+ BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
/* Our event wasn't used in the end. Free it. */
fsnotify_destroy_event(group, fsn_event);
- ret = 0;
+
+ return 0;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index b6175fa..287a22c 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -698,6 +698,7 @@
struct fsnotify_group *group;
int f_flags, fd;
struct user_struct *user;
+ struct fanotify_event_info *oevent;
pr_debug("%s: flags=%d event_f_flags=%d\n",
__func__, flags, event_f_flags);
@@ -730,8 +731,20 @@
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
+ oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+ if (unlikely(!oevent)) {
+ fd = -ENOMEM;
+ goto out_destroy_group;
+ }
+ group->overflow_event = &oevent->fse;
+ fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
+ oevent->tgid = get_pid(task_tgid(current));
+ oevent->path.mnt = NULL;
+ oevent->path.dentry = NULL;
+
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ oevent->response = 0;
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 1d4e1ea2..9d3e9c5 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -179,7 +179,7 @@
return group->ops->handle_event(group, to_tell, inode_mark,
vfsmount_mark, mask, data, data_is,
- file_name);
+ file_name, cookie);
}
/*
diff --git a/fs/notify/group.c b/fs/notify/group.c
index ee674fe..ad19959 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -55,6 +55,13 @@
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
+ /*
+ * Destroy overflow event (we cannot use fsnotify_destroy_event() as
+ * that deliberately ignores overflow events.
+ */
+ if (group->overflow_event)
+ group->ops->free_event(group->overflow_event);
+
fsnotify_put_group(group);
}
@@ -99,7 +106,6 @@
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
- fsnotify_init_event(&group->overflow_event, NULL, FS_Q_OVERFLOW);
return group;
}
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index 485eef3..ed855ef 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -27,6 +27,6 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name);
+ const unsigned char *file_name, u32 cookie);
extern const struct fsnotify_ops inotify_fsnotify_ops;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index d5ee563..43ab1e1 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -67,7 +67,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
struct inotify_inode_mark *i_mark;
struct inotify_event_info *event;
@@ -103,6 +103,7 @@
fsn_event = &event->fse;
fsnotify_init_event(fsn_event, inode, mask);
event->wd = i_mark->wd;
+ event->sync_cookie = cookie;
event->name_len = len;
if (len)
strcpy(event->name, file_name);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 497395c..78a2ca3 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -495,7 +495,7 @@
/* Queue ignore event for the watch */
inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
- NULL, FSNOTIFY_EVENT_NONE, NULL);
+ NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
/* remove this mark from the idr */
@@ -633,11 +633,23 @@
static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
+ struct inotify_event_info *oevent;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
+ oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
+ if (unlikely(!oevent)) {
+ fsnotify_destroy_group(group);
+ return ERR_PTR(-ENOMEM);
+ }
+ group->overflow_event = &oevent->fse;
+ fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
+ oevent->wd = -1;
+ oevent->sync_cookie = 0;
+ oevent->name_len = 0;
+
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 18b3c44..1e58402 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -80,7 +80,8 @@
/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. The function returns 0 if the event was
- * added to the queue, 1 if the event was merged with some other queued event.
+ * added to the queue, 1 if the event was merged with some other queued event,
+ * 2 if the queue of events has overflown.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
@@ -95,10 +96,14 @@
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
+ ret = 2;
/* Queue overflow event only if it isn't already queued */
- if (list_empty(&group->overflow_event.list))
- event = &group->overflow_event;
- ret = 1;
+ if (!list_empty(&group->overflow_event->list)) {
+ mutex_unlock(&group->notification_mutex);
+ return ret;
+ }
+ event = group->overflow_event;
+ goto queue;
}
if (!list_empty(list) && merge) {
@@ -109,6 +114,7 @@
}
}
+queue:
group->q_len++;
list_add_tail(&event->list, list);
mutex_unlock(&group->notification_mutex);
@@ -132,7 +138,11 @@
event = list_first_entry(&group->notification_list,
struct fsnotify_event, list);
- list_del(&event->list);
+ /*
+ * We need to init list head for the case of overflow event so that
+ * check in fsnotify_add_notify_events() works
+ */
+ list_del_init(&event->list);
group->q_len--;
return event;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8450262bc..51632c4 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2393,8 +2393,8 @@
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) {
- ret = filemap_fdatawrite_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
if (ret < 0)
written = ret;
@@ -2407,8 +2407,8 @@
}
if (!ret)
- ret = filemap_fdatawait_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
}
/*
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index aaa5061..d7b5108 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -717,6 +717,12 @@
*/
if (status < 0)
mlog_errno(status);
+ /*
+ * Clear dq_off so that we search for the structure in quota file next
+ * time we acquire it. The structure might be deleted and reallocated
+ * elsewhere by another node while our dquot structure is on freelist.
+ */
+ dquot->dq_off = 0;
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans:
ocfs2_commit_trans(osb, handle);
@@ -756,16 +762,17 @@
status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
- status = ocfs2_qinfo_lock(info, 0);
- if (status < 0)
- goto out_dq;
- status = qtree_read_dquot(&info->dqi_gi, dquot);
- ocfs2_qinfo_unlock(info, 0);
- if (status < 0)
- goto out_dq;
- }
- set_bit(DQ_READ_B, &dquot->dq_flags);
+ status = ocfs2_qinfo_lock(info, 0);
+ if (status < 0)
+ goto out_dq;
+ /*
+ * We always want to read dquot structure from disk because we don't
+ * know what happened with it while it was on freelist.
+ */
+ status = qtree_read_dquot(&info->dqi_gi, dquot);
+ ocfs2_qinfo_unlock(info, 0);
+ if (status < 0)
+ goto out_dq;
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2e4344b..2001862 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -1303,10 +1303,6 @@
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
out:
- /* Clear the read bit so that next time someone uses this
- * dquot he reads fresh info from disk and allocates local
- * dquot structure */
- clear_bit(DQ_READ_B, &dquot->dq_flags);
return status;
}
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 1324e66..ca5ce14 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -346,7 +346,9 @@
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
new_conn->cc_namelen = grouplen;
- strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1);
+ if (cluster_name_len)
+ strlcpy(new_conn->cc_cluster_name, cluster_name,
+ CLUSTER_NAME_MAX + 1);
new_conn->cc_cluster_name_len = cluster_name_len;
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
diff --git a/fs/open.c b/fs/open.c
index 4b3e1ed..b9ed8b2 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -705,6 +705,10 @@
return 0;
}
+ /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
+ if (S_ISREG(inode->i_mode))
+ f->f_mode |= FMODE_ATOMIC_POS;
+
f->f_op = fops_get(inode->i_fop);
if (unlikely(WARN_ON(!f->f_op))) {
error = -ENODEV;
diff --git a/fs/pnode.c b/fs/pnode.c
index c7221bb..88396df 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -220,14 +220,14 @@
* @tree_list : list of heads of trees to be attached.
*/
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
- struct mount *source_mnt, struct list_head *tree_list)
+ struct mount *source_mnt, struct hlist_head *tree_list)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
struct mount *m, *child;
int ret = 0;
struct mount *prev_dest_mnt = dest_mnt;
struct mount *prev_src_mnt = source_mnt;
- LIST_HEAD(tmp_list);
+ HLIST_HEAD(tmp_list);
for (m = propagation_next(dest_mnt, dest_mnt); m;
m = propagation_next(m, dest_mnt)) {
@@ -246,27 +246,29 @@
child = copy_tree(source, source->mnt.mnt_root, type);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
- list_splice(tree_list, tmp_list.prev);
+ tmp_list = *tree_list;
+ tmp_list.first->pprev = &tmp_list.first;
+ INIT_HLIST_HEAD(tree_list);
goto out;
}
if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
mnt_set_mountpoint(m, dest_mp, child);
- list_add_tail(&child->mnt_hash, tree_list);
+ hlist_add_head(&child->mnt_hash, tree_list);
} else {
/*
* This can happen if the parent mount was bind mounted
* on some subdirectory of a shared/slave mount.
*/
- list_add_tail(&child->mnt_hash, &tmp_list);
+ hlist_add_head(&child->mnt_hash, &tmp_list);
}
prev_dest_mnt = m;
prev_src_mnt = child;
}
out:
lock_mount_hash();
- while (!list_empty(&tmp_list)) {
- child = list_first_entry(&tmp_list, struct mount, mnt_hash);
+ while (!hlist_empty(&tmp_list)) {
+ child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
umount_tree(child, 0);
}
unlock_mount_hash();
@@ -338,8 +340,10 @@
* umount the child only if the child has no
* other children
*/
- if (child && list_empty(&child->mnt_mounts))
- list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
+ if (child && list_empty(&child->mnt_mounts)) {
+ hlist_del_init_rcu(&child->mnt_hash);
+ hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
+ }
}
}
@@ -350,11 +354,11 @@
*
* vfsmount lock must be held for write
*/
-int propagate_umount(struct list_head *list)
+int propagate_umount(struct hlist_head *list)
{
struct mount *mnt;
- list_for_each_entry(mnt, list, mnt_hash)
+ hlist_for_each_entry(mnt, list, mnt_hash)
__propagate_umount(mnt);
return 0;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 59e7eda..fc28a27 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -36,8 +36,8 @@
void change_mnt_propagation(struct mount *, int);
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
- struct list_head *);
-int propagate_umount(struct list_head *);
+ struct hlist_head *);
+int propagate_umount(struct hlist_head *);
int propagate_mount_busy(struct mount *, int);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5150706..b976062 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1824,6 +1824,7 @@
if (rc)
goto out_mmput;
+ rc = -ENOENT;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 02174a6..e647c55 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -121,9 +121,8 @@
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page) &&
- (PageLRU(compound_trans_head(page)) ||
- PageAnon(compound_trans_head(page))))
+ else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
+ PageAnon(compound_head(page))))
u |= 1 << KPF_THP;
/*
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 831d49a..cfc8dcc 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -581,9 +581,17 @@
dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
- ret = fn(dquot, priv);
- if (ret < 0)
- goto out;
+ /*
+ * ->release_dquot() can be racing with us. Our reference
+ * protects us from new calls to it so just wait for any
+ * outstanding call and recheck the DQ_ACTIVE_B after that.
+ */
+ wait_on_dquot(dquot);
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ ret = fn(dquot, priv);
+ if (ret < 0)
+ goto out;
+ }
spin_lock(&dq_list_lock);
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
diff --git a/fs/read_write.c b/fs/read_write.c
index edc5746..31c6efa 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -264,10 +264,22 @@
}
EXPORT_SYMBOL(vfs_llseek);
+static inline struct fd fdget_pos(int fd)
+{
+ return __to_fd(__fdget_pos(fd));
+}
+
+static inline void fdput_pos(struct fd f)
+{
+ if (f.flags & FDPUT_POS_UNLOCK)
+ mutex_unlock(&f.file->f_pos_lock);
+ fdput(f);
+}
+
SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
{
off_t retval;
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
if (!f.file)
return -EBADF;
@@ -278,7 +290,7 @@
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
}
- fdput(f);
+ fdput_pos(f);
return retval;
}
@@ -295,7 +307,7 @@
unsigned int, whence)
{
int retval;
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
loff_t offset;
if (!f.file)
@@ -315,7 +327,7 @@
retval = 0;
}
out_putf:
- fdput(f);
+ fdput_pos(f);
return retval;
}
#endif
@@ -498,7 +510,7 @@
SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -506,7 +518,7 @@
ret = vfs_read(f.file, buf, count, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
return ret;
}
@@ -514,7 +526,7 @@
SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
size_t, count)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -522,7 +534,7 @@
ret = vfs_write(f.file, buf, count, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
return ret;
@@ -797,7 +809,7 @@
SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -805,7 +817,7 @@
ret = vfs_readv(f.file, vec, vlen, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
if (ret > 0)
@@ -817,7 +829,7 @@
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -825,7 +837,7 @@
ret = vfs_writev(f.file, vec, vlen, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
if (ret > 0)
@@ -968,7 +980,7 @@
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret;
loff_t pos;
@@ -978,13 +990,13 @@
ret = compat_readv(f.file, vec, vlen, &pos);
if (ret >= 0)
f.file->f_pos = pos;
- fdput(f);
+ fdput_pos(f);
return ret;
}
-COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
- const struct compat_iovec __user *,vec,
- unsigned long, vlen, loff_t, pos)
+static long __compat_sys_preadv64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos)
{
struct fd f;
ssize_t ret;
@@ -1001,12 +1013,22 @@
return ret;
}
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
+ const struct compat_iovec __user *,vec,
+ unsigned long, vlen, loff_t, pos)
+{
+ return __compat_sys_preadv64(fd, vec, vlen, pos);
+}
+#endif
+
COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
- return compat_sys_preadv64(fd, vec, vlen, pos);
+
+ return __compat_sys_preadv64(fd, vec, vlen, pos);
}
static size_t compat_writev(struct file *file,
@@ -1035,7 +1057,7 @@
const struct compat_iovec __user *, vec,
compat_ulong_t, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret;
loff_t pos;
@@ -1045,13 +1067,13 @@
ret = compat_writev(f.file, vec, vlen, &pos);
if (ret >= 0)
f.file->f_pos = pos;
- fdput(f);
+ fdput_pos(f);
return ret;
}
-COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
- const struct compat_iovec __user *,vec,
- unsigned long, vlen, loff_t, pos)
+static long __compat_sys_pwritev64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos)
{
struct fd f;
ssize_t ret;
@@ -1068,12 +1090,22 @@
return ret;
}
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
+ const struct compat_iovec __user *,vec,
+ unsigned long, vlen, loff_t, pos)
+{
+ return __compat_sys_pwritev64(fd, vec, vlen, pos);
+}
+#endif
+
COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
- return compat_sys_pwritev64(fd, vec, vlen, pos);
+
+ return __compat_sys_pwritev64(fd, vec, vlen, pos);
}
#endif
diff --git a/fs/sync.c b/fs/sync.c
index e8ba024..b28d1dd 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,11 +27,10 @@
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
-static int __sync_filesystem(struct super_block *sb, int wait,
- unsigned long start)
+static int __sync_filesystem(struct super_block *sb, int wait)
{
if (wait)
- sync_inodes_sb(sb, start);
+ sync_inodes_sb(sb);
else
writeback_inodes_sb(sb, WB_REASON_SYNC);
@@ -48,7 +47,6 @@
int sync_filesystem(struct super_block *sb)
{
int ret;
- unsigned long start = jiffies;
/*
* We need to be protected against the filesystem going from
@@ -62,17 +60,17 @@
if (sb->s_flags & MS_RDONLY)
return 0;
- ret = __sync_filesystem(sb, 0, start);
+ ret = __sync_filesystem(sb, 0);
if (ret < 0)
return ret;
- return __sync_filesystem(sb, 1, start);
+ return __sync_filesystem(sb, 1);
}
EXPORT_SYMBOL_GPL(sync_filesystem);
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!(sb->s_flags & MS_RDONLY))
- sync_inodes_sb(sb, *((unsigned long *)arg));
+ sync_inodes_sb(sb);
}
static void sync_fs_one_sb(struct super_block *sb, void *arg)
@@ -104,10 +102,9 @@
SYSCALL_DEFINE0(sync)
{
int nowait = 0, wait = 1;
- unsigned long start = jiffies;
wakeup_flusher_threads(0, WB_REASON_SYNC);
- iterate_supers(sync_inodes_one_sb, &start);
+ iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 6211230..3eaf5c6 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -27,6 +27,7 @@
{
struct dentry *root;
void *ns;
+ bool new_sb;
if (!(flags & MS_KERNMOUNT)) {
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
@@ -37,8 +38,8 @@
}
ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
- root = kernfs_mount_ns(fs_type, flags, sysfs_root, ns);
- if (IS_ERR(root))
+ root = kernfs_mount_ns(fs_type, flags, sysfs_root, &new_sb, ns);
+ if (IS_ERR(root) || !new_sb)
kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
return root;
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index c02a27a..1037637 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -144,6 +144,7 @@
size_t count = iocb->ki_nbytes;
struct udf_inode_info *iinfo = UDF_I(inode);
+ mutex_lock(&inode->i_mutex);
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (file->f_flags & O_APPEND)
@@ -156,6 +157,7 @@
pos + count)) {
err = udf_expand_file_adinicb(inode);
if (err) {
+ mutex_unlock(&inode->i_mutex);
udf_debug("udf_expand_adinicb: err=%d\n", err);
return err;
}
@@ -169,9 +171,17 @@
} else
up_write(&iinfo->i_data_sem);
- retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
- if (retval > 0)
+ retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ mutex_unlock(&inode->i_mutex);
+
+ if (retval > 0) {
+ ssize_t err;
+
mark_inode_dirty(inode);
+ err = generic_write_sync(file, iocb->ki_pos - retval, retval);
+ if (err < 0)
+ retval = err;
+ }
return retval;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 062b792..982ce05 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -265,6 +265,7 @@
.nr_to_write = 1,
};
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f35d5c95..9ddfb81 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -705,7 +705,6 @@
{
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
- int mask = iattr->ia_valid;
xfs_off_t oldsize, newsize;
struct xfs_trans *tp;
int error;
@@ -726,8 +725,8 @@
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(S_ISREG(ip->i_d.di_mode));
- ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -736,7 +735,7 @@
* Short circuit the truncate case for zero length files.
*/
if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
- if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
+ if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
return 0;
/*
@@ -824,10 +823,11 @@
* these flags set. For all other operations the VFS set these flags
* explicitly if it wants a timestamp update.
*/
- if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ if (newsize != oldsize &&
+ !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
iattr->ia_ctime = iattr->ia_mtime =
current_fs_time(inode->i_sb);
- mask |= ATTR_CTIME | ATTR_MTIME;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
}
/*
@@ -863,9 +863,9 @@
xfs_inode_clear_eofblocks_tag(ip);
}
- if (mask & ATTR_MODE)
+ if (iattr->ia_valid & ATTR_MODE)
xfs_setattr_mode(ip, iattr);
- if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
+ if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
xfs_setattr_time(ip, iattr);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cdebd83..4ef6fdb 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -205,16 +205,25 @@
/*
* We 64-bit align the length of each iovec so that the start
* of the next one is naturally aligned. We'll need to
- * account for that slack space here.
+ * account for that slack space here. Then round nbytes up
+ * to 64-bit alignment so that the initial buffer alignment is
+ * easy to calculate and verify.
*/
nbytes += niovecs * sizeof(uint64_t);
+ nbytes = round_up(nbytes, sizeof(uint64_t));
/* grab the old item if it exists for reservation accounting */
old_lv = lip->li_lv;
- /* calc buffer size */
- buf_size = sizeof(struct xfs_log_vec) + nbytes +
- niovecs * sizeof(struct xfs_log_iovec);
+ /*
+ * The data buffer needs to start 64-bit aligned, so round up
+ * that space to ensure we can align it appropriately and not
+ * overrun the buffer.
+ */
+ buf_size = nbytes +
+ round_up((sizeof(struct xfs_log_vec) +
+ niovecs * sizeof(struct xfs_log_iovec)),
+ sizeof(uint64_t));
/* compare to existing item size */
if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
@@ -251,6 +260,8 @@
/* The allocated data region lies beyond the iovec region */
lv->lv_buf_len = 0;
lv->lv_buf = (char *)lv + buf_size - nbytes;
+ ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
+
lip->li_ops->iop_format(lip, lv);
insert:
ASSERT(lv->lv_buf_len <= nbytes);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 02df7b4..f96c056 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -282,22 +282,29 @@
struct xfs_sb *sbp = &mp->m_sb;
int error;
int loud = !(flags & XFS_MFSI_QUIET);
+ const struct xfs_buf_ops *buf_ops;
ASSERT(mp->m_sb_bp == NULL);
ASSERT(mp->m_ddev_targp != NULL);
/*
+ * For the initial read, we must guess at the sector
+ * size based on the block device. It's enough to
+ * get the sb_sectsize out of the superblock and
+ * then reread with the proper length.
+ * We don't verify it yet, because it may not be complete.
+ */
+ sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
+ buf_ops = NULL;
+
+ /*
* Allocate a (locked) buffer to hold the superblock.
* This will be kept around at all times to optimize
* access to the superblock.
*/
- sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
-
reread:
bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), 0,
- loud ? &xfs_sb_buf_ops
- : &xfs_sb_quiet_buf_ops);
+ BTOBB(sector_size), 0, buf_ops);
if (!bp) {
if (loud)
xfs_warn(mp, "SB buffer read failed");
@@ -328,12 +335,13 @@
}
/*
- * If device sector size is smaller than the superblock size,
- * re-read the superblock so the buffer is correctly sized.
+ * Re-read the superblock so the buffer is correctly sized,
+ * and properly verified.
*/
- if (sector_size < sbp->sb_sectsize) {
+ if (buf_ops == NULL) {
xfs_buf_relse(bp);
sector_size = sbp->sb_sectsize;
+ buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
goto reread;
}
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index b7c9aea..1e11679 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -295,8 +295,7 @@
sbp->sb_dblocks == 0 ||
sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
- XFS_CORRUPTION_ERROR("SB sanity check failed",
- XFS_ERRLEVEL_LOW, mp, sbp);
+ xfs_notice(mp, "SB sanity check failed");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -611,10 +610,10 @@
XFS_SB_VERSION_5) ||
dsb->sb_crc != 0)) {
- if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
+ if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
offsetof(struct xfs_sb, sb_crc))) {
/* Only fail bad secondaries on a known V5 filesystem */
- if (bp->b_bn != XFS_SB_DADDR &&
+ if (bp->b_bn == XFS_SB_DADDR ||
xfs_sb_version_hascrc(&mp->m_sb)) {
error = EFSCORRUPTED;
goto out_error;
@@ -625,7 +624,7 @@
out_error:
if (error) {
- if (error != EWRONGFS)
+ if (error == EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
mp, bp->b_addr);
xfs_buf_ioerror(bp, error);
@@ -644,7 +643,6 @@
{
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
-
if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
/* XFS filesystem, verify noisily! */
xfs_sb_read_verify(bp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f317488..d971f49 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -913,7 +913,7 @@
struct super_block *sb = mp->m_super;
if (down_read_trylock(&sb->s_umount)) {
- sync_inodes_sb(sb, jiffies);
+ sync_inodes_sb(sb);
up_read(&sb->s_umount);
}
}
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
index fa2a50b..0a7e066 100644
--- a/include/asm-generic/bitops/const_hweight.h
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -5,14 +5,15 @@
* Compile time versions of __arch_hweightN()
*/
#define __const_hweight8(w) \
- ( (!!((w) & (1ULL << 0))) + \
- (!!((w) & (1ULL << 1))) + \
- (!!((w) & (1ULL << 2))) + \
- (!!((w) & (1ULL << 3))) + \
- (!!((w) & (1ULL << 4))) + \
- (!!((w) & (1ULL << 5))) + \
- (!!((w) & (1ULL << 6))) + \
- (!!((w) & (1ULL << 7))) )
+ ((unsigned int) \
+ ((!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7)))))
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644
index 0000000..10cd4ff
--- /dev/null
+++ b/include/asm-generic/mcs_spinlock.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MCS_SPINLOCK_H
+#define __ASM_MCS_SPINLOCK_H
+
+/*
+ * Architectures can define their own:
+ *
+ * arch_mcs_spin_lock_contended(l)
+ * arch_mcs_spin_unlock_contended(l)
+ *
+ * See kernel/locking/mcs_spinlock.c.
+ */
+
+#endif /* __ASM_MCS_SPINLOCK_H */
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index a1116a3..8c1603b 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -36,10 +36,10 @@
#define TEGRA124_CLK_PWM 17
#define TEGRA124_CLK_I2S2 18
/* 20 (register bit affects vi and vi_sensor) */
-#define TEGRA124_CLK_GR_2D 21
+/* 21 */
#define TEGRA124_CLK_USBD 22
#define TEGRA124_CLK_ISP 23
-#define TEGRA124_CLK_GR_3D 24
+/* 26 */
/* 25 */
#define TEGRA124_CLK_DISP2 26
#define TEGRA124_CLK_DISP1 27
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be85127..f27000f 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -171,6 +171,11 @@
return 0;
}
+static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+ return -ENXIO;
+}
+
static inline int kvm_vgic_init(struct kvm *kvm)
{
return 0;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aa865a9..ec1464d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -43,6 +43,7 @@
struct mqstat;
struct audit_watch;
struct audit_tree;
+struct sk_buff;
struct audit_krule {
int vers_ops;
@@ -463,7 +464,7 @@
extern int audit_filter_type(int type);
extern int audit_rule_change(int type, __u32 portid, int seq,
void *data, size_t datasz);
-extern int audit_list_rules_send(__u32 portid, int seq);
+extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
extern u32 audit_enabled;
#else /* CONFIG_AUDIT */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index abc9ca7..be5fd38 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -196,6 +196,21 @@
#ifdef __KERNEL__
+#ifndef set_mask_bits
+#define set_mask_bits(ptr, _mask, _bits) \
+({ \
+ const typeof(*ptr) mask = (_mask), bits = (_bits); \
+ typeof(*ptr) old, new; \
+ \
+ do { \
+ old = ACCESS_ONCE(*ptr); \
+ new = (old & ~mask) | bits; \
+ } while (cmpxchg(ptr, old, new) != old); \
+ \
+ new; \
+})
+#endif
+
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 18ba8a6..2ff2e8d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -121,8 +121,7 @@
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *,
- bool, bool);
+void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -134,7 +133,13 @@
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
-void blk_mq_end_io(struct request *rq, int error);
+bool blk_mq_end_io_partial(struct request *rq, int error,
+ unsigned int nr_bytes);
+static inline void blk_mq_end_io(struct request *rq, int error)
+{
+ bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
+ BUG_ON(!done);
+}
void blk_mq_complete_request(struct request *rq);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5c09759..9450f02 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -166,6 +166,8 @@
*
* The ID of the root cgroup is always 0, and a new cgroup
* will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
*/
int id;
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 092b641..4a21a87 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -245,6 +245,10 @@
void omap2_init_clk_clkdm(struct clk_hw *clk);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate);
int omap2_clkops_enable_clkdm(struct clk_hw *hw);
void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3f448c6..01c0aa5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -14,6 +14,7 @@
#include <linux/if.h>
#include <linux/fs.h>
#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/unistd.h>
#include <asm/compat.h>
#include <asm/siginfo.h>
@@ -27,6 +28,9 @@
#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
#endif
+#define COMPAT_SYSCALL_DEFINE0(name) \
+ asmlinkage long compat_sys_##name(void)
+
#define COMPAT_SYSCALL_DEFINE1(name, ...) \
COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
#define COMPAT_SYSCALL_DEFINE2(name, ...) \
@@ -68,6 +72,8 @@
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
+typedef compat_ulong_t compat_aio_context_t;
+
struct compat_sel_arg_struct;
struct rusage;
@@ -318,7 +324,7 @@
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, long msgtyp, int msgflg);
+ compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmctl(int first, int second, void __user *uptr);
long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
@@ -337,6 +343,19 @@
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
@@ -451,7 +470,7 @@
asmlinkage long compat_sys_timerfd_gettime(int ufd,
struct compat_itimerspec __user *otmr);
-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
__u32 __user *pages,
const int __user *nodes,
int __user *status,
@@ -481,20 +500,20 @@
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
- unsigned long arg);
+ compat_ulong_t arg);
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
- unsigned long arg);
+ compat_ulong_t arg);
asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
-asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id,
- unsigned long min_nr,
- unsigned long nr,
+asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
struct io_event __user *events,
struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr,
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
u32 __user *iocb);
asmlinkage long compat_sys_mount(const char __user *dev_name,
const char __user *dir_name,
- const char __user *type, unsigned long flags,
+ const char __user *type, compat_ulong_t flags,
const void __user *data);
asmlinkage long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent __user *,
@@ -502,9 +521,11 @@
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
+#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
asmlinkage long compat_sys_getdents64(unsigned int fd,
struct linux_dirent64 __user *dirent,
unsigned int count);
+#endif
asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
unsigned int nr_segs, unsigned int flags);
asmlinkage long compat_sys_open(const char __user *filename, int flags,
@@ -549,9 +570,9 @@
unsigned vlen, unsigned int flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
@@ -615,16 +636,16 @@
struct compat_siginfo __user *uinfo);
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- unsigned long arg);
+ compat_ulong_t arg);
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
struct compat_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
char __user *optval, int __user *optlen);
-asmlinkage long compat_sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+ compat_ulong_t nr_segments,
struct compat_kexec_segment __user *,
- unsigned long flags);
+ compat_ulong_t flags);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr __user *u_mqstat,
struct compat_mq_attr __user *u_omqstat);
@@ -635,11 +656,11 @@
struct compat_mq_attr __user *u_attr);
asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
const char __user *u_msg_ptr,
- size_t msg_len, unsigned int msg_prio,
+ compat_size_t msg_len, unsigned int msg_prio,
const struct compat_timespec __user *u_abs_timeout);
asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
- size_t msg_len, unsigned int __user *u_msg_prio,
+ compat_size_t msg_len, unsigned int __user *u_msg_prio,
const struct compat_timespec __user *u_abs_timeout);
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
@@ -654,12 +675,12 @@
asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
const struct compat_iovec __user *lvec,
- unsigned long liovcnt, const struct compat_iovec __user *rvec,
- unsigned long riovcnt, unsigned long flags);
+ compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+ compat_ulong_t riovcnt, compat_ulong_t flags);
asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
const struct compat_iovec __user *lvec,
- unsigned long liovcnt, const struct compat_iovec __user *rvec,
- unsigned long riovcnt, unsigned long flags);
+ compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+ compat_ulong_t riovcnt, compat_ulong_t flags);
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0a819e7..6c100ff 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -153,6 +153,102 @@
u8 sets_to_zero;
} efi_time_cap_t;
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+} __packed efi_boot_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 raise_tpl;
+ u64 restore_tpl;
+ u64 allocate_pages;
+ u64 free_pages;
+ u64 get_memory_map;
+ u64 allocate_pool;
+ u64 free_pool;
+ u64 create_event;
+ u64 set_timer;
+ u64 wait_for_event;
+ u64 signal_event;
+ u64 close_event;
+ u64 check_event;
+ u64 install_protocol_interface;
+ u64 reinstall_protocol_interface;
+ u64 uninstall_protocol_interface;
+ u64 handle_protocol;
+ u64 __reserved;
+ u64 register_protocol_notify;
+ u64 locate_handle;
+ u64 locate_device_path;
+ u64 install_configuration_table;
+ u64 load_image;
+ u64 start_image;
+ u64 exit;
+ u64 unload_image;
+ u64 exit_boot_services;
+ u64 get_next_monotonic_count;
+ u64 stall;
+ u64 set_watchdog_timer;
+ u64 connect_controller;
+ u64 disconnect_controller;
+ u64 open_protocol;
+ u64 close_protocol;
+ u64 open_protocol_information;
+ u64 protocols_per_handle;
+ u64 locate_handle_buffer;
+ u64 locate_protocol;
+ u64 install_multiple_protocol_interfaces;
+ u64 uninstall_multiple_protocol_interfaces;
+ u64 calculate_crc32;
+ u64 copy_mem;
+ u64 set_mem;
+ u64 create_event_ex;
+} __packed efi_boot_services_64_t;
+
/*
* EFI Boot Services table
*/
@@ -231,6 +327,15 @@
EfiPciIoAttributeOperationMaximum
} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef struct {
+ u64 read;
+ u64 write;
+} efi_pci_io_protocol_access_64_t;
typedef struct {
void *read;
@@ -238,6 +343,46 @@
} efi_pci_io_protocol_access_t;
typedef struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_32;
+
+typedef struct {
+ u64 poll_mem;
+ u64 poll_io;
+ efi_pci_io_protocol_access_64_t mem;
+ efi_pci_io_protocol_access_64_t io;
+ efi_pci_io_protocol_access_64_t pci;
+ u64 copy_mem;
+ u64 map;
+ u64 unmap;
+ u64 allocate_buffer;
+ u64 free_buffer;
+ u64 flush;
+ u64 get_location;
+ u64 attributes;
+ u64 get_bar_attributes;
+ u64 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_64;
+
+typedef struct {
void *poll_mem;
void *poll_io;
efi_pci_io_protocol_access_t mem;
@@ -292,6 +437,42 @@
typedef struct {
efi_table_hdr_t hdr;
+ u32 get_time;
+ u32 set_time;
+ u32 get_wakeup_time;
+ u32 set_wakeup_time;
+ u32 set_virtual_address_map;
+ u32 convert_pointer;
+ u32 get_variable;
+ u32 get_next_variable;
+ u32 set_variable;
+ u32 get_next_high_mono_count;
+ u32 reset_system;
+ u32 update_capsule;
+ u32 query_capsule_caps;
+ u32 query_variable_info;
+} efi_runtime_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 get_time;
+ u64 set_time;
+ u64 get_wakeup_time;
+ u64 set_wakeup_time;
+ u64 set_virtual_address_map;
+ u64 convert_pointer;
+ u64 get_variable;
+ u64 get_next_variable;
+ u64 set_variable;
+ u64 get_next_high_mono_count;
+ u64 reset_system;
+ u64 update_capsule;
+ u64 query_capsule_caps;
+ u64 query_variable_info;
+} efi_runtime_services_64_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
void *get_time;
void *set_time;
void *get_wakeup_time;
@@ -485,6 +666,38 @@
typedef struct {
u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_32_t;
+
+typedef struct {
+ u32 revision;
+ u64 parent_handle;
+ u64 system_table;
+ u64 device_handle;
+ u64 file_path;
+ u64 reserved;
+ u32 load_options_size;
+ u64 load_options;
+ u64 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_64_t;
+
+typedef struct {
+ u32 revision;
void *parent_handle;
efi_system_table_t *system_table;
void *device_handle;
@@ -511,6 +724,34 @@
efi_char16_t filename[1];
} efi_file_info_t;
+typedef struct {
+ u64 revision;
+ u32 open;
+ u32 close;
+ u32 delete;
+ u32 read;
+ u32 write;
+ u32 get_position;
+ u32 set_position;
+ u32 get_info;
+ u32 set_info;
+ u32 flush;
+} efi_file_handle_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open;
+ u64 close;
+ u64 delete;
+ u64 read;
+ u64 write;
+ u64 get_position;
+ u64 set_position;
+ u64 get_info;
+ u64 set_info;
+ u64 flush;
+} efi_file_handle_64_t;
+
typedef struct _efi_file_handle {
u64 revision;
efi_status_t (*open)(struct _efi_file_handle *,
@@ -573,6 +814,7 @@
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
struct efi_memory_map *memmap;
+ unsigned long flags;
} efi;
static inline int
@@ -659,18 +901,17 @@
#define EFI_ARCH_1 6 /* First arch-specific bit */
#ifdef CONFIG_EFI
-# ifdef CONFIG_X86
-extern int efi_enabled(int facility);
-# else
-static inline int efi_enabled(int facility)
+/*
+ * Test whether the above EFI_* bits are enabled.
+ */
+static inline bool efi_enabled(int feature)
{
- return 1;
+ return test_bit(feature, &efi.flags) != 0;
}
-# endif
#else
-static inline int efi_enabled(int facility)
+static inline bool efi_enabled(int feature)
{
- return 0;
+ return false;
}
#endif
@@ -809,6 +1050,17 @@
bool deleting;
};
+struct efi_simple_text_output_protocol_32 {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+};
+
+struct efi_simple_text_output_protocol_64 {
+ u64 reset;
+ u64 output_string;
+ u64 test_string;
+};
struct efi_simple_text_output_protocol {
void *reset;
diff --git a/include/linux/file.h b/include/linux/file.h
index cbacf4f..4d69123 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -28,33 +28,36 @@
struct fd {
struct file *file;
- int need_put;
+ unsigned int flags;
};
+#define FDPUT_FPUT 1
+#define FDPUT_POS_UNLOCK 2
static inline void fdput(struct fd fd)
{
- if (fd.need_put)
+ if (fd.flags & FDPUT_FPUT)
fput(fd.file);
}
extern struct file *fget(unsigned int fd);
-extern struct file *fget_light(unsigned int fd, int *fput_needed);
+extern struct file *fget_raw(unsigned int fd);
+extern unsigned long __fdget(unsigned int fd);
+extern unsigned long __fdget_raw(unsigned int fd);
+extern unsigned long __fdget_pos(unsigned int fd);
+
+static inline struct fd __to_fd(unsigned long v)
+{
+ return (struct fd){(struct file *)(v & ~3),v & 3};
+}
static inline struct fd fdget(unsigned int fd)
{
- int b;
- struct file *f = fget_light(fd, &b);
- return (struct fd){f,b};
+ return __to_fd(__fdget(fd));
}
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
-
static inline struct fd fdget_raw(unsigned int fd)
{
- int b;
- struct file *f = fget_raw_light(fd, &b);
- return (struct fd){f,b};
+ return __to_fd(__fdget_raw(fd));
}
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 5d7782e..c3683bd 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -200,6 +200,7 @@
unsigned irmc:1;
unsigned bc_implemented:2;
+ work_func_t workfn;
struct delayed_work work;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6082956..23b2a35 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -123,6 +123,9 @@
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)0x4000)
+/* File needs atomic accesses to f_pos */
+#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@@ -780,13 +783,14 @@
const struct file_operations *f_op;
/*
- * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR.
+ * Protects f_ep_links, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
+ struct mutex f_pos_lock;
loff_t f_pos;
struct fown_struct f_owner;
const struct cred *f_cred;
@@ -808,7 +812,7 @@
#ifdef CONFIG_DEBUG_WRITECOUNT
unsigned long f_mnt_write_state;
#endif
-};
+} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 3d286ff..64cf3ef 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -99,7 +99,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name);
+ const unsigned char *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
@@ -160,7 +160,7 @@
struct fasync_struct *fsn_fa; /* async notification */
- struct fsnotify_event overflow_event; /* Event we queue when the
+ struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4e4cc28..4cdb3a1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -495,10 +495,6 @@
FILTER_TRACE_FN,
};
-#define EVENT_STORAGE_SIZE 128
-extern struct mutex event_storage_mutex;
-extern char event_storage[EVENT_STORAGE_SIZE];
-
extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b0d95ca..6435f46 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -55,7 +55,11 @@
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+#define futex_cmpxchg_enabled 1
+#else
extern int futex_cmpxchg_enabled;
+#endif
#else
static inline void exit_robust_list(struct task_struct *curr)
{
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0437439..39b81dc 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -123,6 +123,10 @@
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
+/*
+ * GFP_THISNODE does not perform any reclaim, you most likely want to
+ * use __GFP_THISNODE to allocate from a given node without fallback!
+ */
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index db51201..b826239 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -157,46 +157,6 @@
return HPAGE_PMD_NR;
return 1;
}
-/*
- * compound_trans_head() should be used instead of compound_head(),
- * whenever the "page" passed as parameter could be the tail of a
- * transparent hugepage that could be undergoing a
- * __split_huge_page_refcount(). The page structure layout often
- * changes across releases and it makes extensive use of unions. So if
- * the page structure layout will change in a way that
- * page->first_page gets clobbered by __split_huge_page_refcount, the
- * implementation making use of smp_rmb() will be required.
- *
- * Currently we define compound_trans_head as compound_head, because
- * page->private is in the same union with page->first_page, and
- * page->private isn't clobbered. However this also means we're
- * currently leaving dirt into the page->private field of anonymous
- * pages resulting from a THP split, instead of setting page->private
- * to zero like for every other page that has PG_private not set. But
- * anonymous pages don't use page->private so this is not a problem.
- */
-#if 0
-/* This will be needed if page->private will be clobbered in split_huge_page */
-static inline struct page *compound_trans_head(struct page *page)
-{
- if (PageTail(page)) {
- struct page *head;
- head = page->first_page;
- smp_rmb();
- /*
- * head may be a dangling pointer.
- * __split_huge_page_refcount clears PageTail before
- * overwriting first_page, so if PageTail is still
- * there it means the head pointer isn't dangling.
- */
- if (PageTail(page))
- return head;
- }
- return page;
-}
-#else
-#define compound_trans_head(page) compound_head(page)
-#endif
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
@@ -226,7 +186,6 @@
do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
do { } while (0)
-#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
diff --git a/include/linux/init.h b/include/linux/init.h
index e168880..a3ba270 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -163,6 +163,23 @@
#ifndef __ASSEMBLY__
+#ifdef CONFIG_LTO
+/* Work around a LTO gcc problem: when there is no reference to a variable
+ * in a module it will be moved to the end of the program. This causes
+ * reordering of initcalls which the kernel does not like.
+ * Add a dummy reference function to avoid this. The function is
+ * deleted by the linker.
+ */
+#define LTO_REFERENCE_INITCALL(x) \
+ ; /* yes this is needed */ \
+ static __used __exit void *reference_##x(void) \
+ { \
+ return &x; \
+ }
+#else
+#define LTO_REFERENCE_INITCALL(x)
+#endif
+
/* initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined
* by link order.
@@ -175,7 +192,8 @@
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn
+ __attribute__((__section__(".initcall" #id ".init"))) = fn; \
+ LTO_REFERENCE_INITCALL(__initcall_##fn##id)
/*
* Early initcalls run before initializing SMP.
diff --git a/include/linux/io.h b/include/linux/io.h
index f4f42fa..8a18e75 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -24,7 +24,7 @@
struct device;
-void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e7831d2..35e7eca 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -118,9 +118,7 @@
* the new maximum will handle anyone else. I may have to revisit this
* in the future.
*/
-#define MIN_QUEUESMAX 1
#define DFLT_QUEUESMAX 256
-#define HARD_QUEUESMAX 1024
#define MIN_MSGMAX 1
#define DFLT_MSG 10U
#define DFLT_MSGMAX 10
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 6601702..19ae05d 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -30,7 +30,9 @@
work->func = func;
}
-void irq_work_queue(struct irq_work *work);
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+
+bool irq_work_queue(struct irq_work *work);
void irq_work_run(void);
void irq_work_sync(struct irq_work *work);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 196d1ea..08fb024 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -458,7 +458,7 @@
#define TAINT_PROPRIETARY_MODULE 0
#define TAINT_FORCED_MODULE 1
-#define TAINT_UNSAFE_SMP 2
+#define TAINT_CPU_OUT_OF_SPEC 2
#define TAINT_FORCED_RMMOD 3
#define TAINT_MACHINE_CHECK 4
#define TAINT_BAD_PAGE 5
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5be9f02..d267623 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -249,7 +249,8 @@
const void *kernfs_super_ns(struct super_block *sb);
struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns);
+ struct kernfs_root *root, bool *new_sb_created,
+ const void *ns);
void kernfs_kill_sb(struct super_block *sb);
void kernfs_init(void);
@@ -317,7 +318,7 @@
static inline struct dentry *
kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns)
+ struct kernfs_root *root, bool *new_sb_created, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -368,9 +369,9 @@
static inline struct dentry *
kernfs_mount(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root)
+ struct kernfs_root *root, bool *new_sb_created)
{
- return kernfs_mount_ns(fs_type, flags, root, NULL);
+ return kernfs_mount_ns(fs_type, flags, root, new_sb_created, NULL);
}
#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 6d4066c..a756419 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -127,12 +127,6 @@
struct kexec_segment __user *segments,
unsigned long flags);
extern int kernel_kexec(void);
-#ifdef CONFIG_COMPAT
-extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct compat_kexec_segment __user *segments,
- unsigned long flags);
-#endif
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
extern void crash_kexec(struct pt_regs *);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index a6a42dd..34a513a 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -12,9 +12,9 @@
#endif
#ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C"
+#define CPP_ASMLINKAGE extern "C" __visible
#else
-#define CPP_ASMLINKAGE
+#define CPP_ASMLINKAGE __visible
#endif
#ifndef asmlinkage
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 92b1bfc..008388f9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -252,9 +252,9 @@
unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
- unsigned int references:11; /* 32 bits */
+ unsigned int references:12; /* 32 bits */
};
/*
@@ -265,7 +265,7 @@
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
-extern void lockdep_sys_exit(void);
+extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -303,7 +303,7 @@
(lock)->dep_map.key, sub)
#define lockdep_set_novalidate_class(lock) \
- lockdep_set_class(lock, &__lockdep_no_validate__)
+ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
/*
* Compare locking classes
*/
@@ -326,9 +326,8 @@
*
* Values for check:
*
- * 0: disabled
- * 1: simple checks (freeing, held-at-exit-time, etc.)
- * 2: full validation
+ * 0: simple checks (freeing, held-at-exit-time, etc.)
+ * 1: full validation
*/
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
@@ -479,15 +478,9 @@
* on the per lock-class debug mode:
*/
-#ifdef CONFIG_PROVE_LOCKING
- #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
- #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
- #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
-#else
- #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
- #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
- #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
-#endif
+#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
+#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
+#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
@@ -518,13 +511,13 @@
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
- lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
- lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
#else
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index ad1ae7f..78c76cd 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -387,7 +387,7 @@
struct i2c_client *muic; /* slave addr 0x4a */
struct mutex iolock;
- int type;
+ unsigned long type;
struct platform_device *battery; /* battery control (not fuel gauge) */
int irq;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 4ecb24b..d68ada5 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -163,7 +163,7 @@
int ono;
u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
- int type;
+ unsigned long type;
bool wakeup;
};
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index a5a7f01..54b5458 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -252,7 +252,7 @@
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
- unsigned int id;
+ unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
@@ -263,7 +263,7 @@
return dev_get_drvdata(dev);
}
-static inline int tps65217_chip_id(struct tps65217 *tps65217)
+static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
{
return tps65217->id;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d354a72..a0df429 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -175,7 +175,7 @@
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
/*
* mapping from the currently active vm_flags protection bits (the
@@ -399,8 +399,18 @@
static inline struct page *compound_head(struct page *page)
{
- if (unlikely(PageTail(page)))
- return page->first_page;
+ if (unlikely(PageTail(page))) {
+ struct page *head = page->first_page;
+
+ /*
+ * page->first_page may be a dangling pointer to an old
+ * compound page, so recheck that it is still a tail
+ * page before returning.
+ */
+ smp_rmb();
+ if (likely(PageTail(page)))
+ return head;
+ }
return page;
}
@@ -757,7 +767,7 @@
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid);
+ return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
static inline int page_cpupid_last(struct page *page)
@@ -766,7 +776,7 @@
}
static inline void page_cpupid_reset_last(struct page *page)
{
- page->_last_cpupid = -1;
+ page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int page_cpupid_last(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f2052c..9b61b9b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -590,10 +590,10 @@
/*
* The NUMA zonelists are doubled because we need zonelists that restrict the
- * allocations to a single node for GFP_THISNODE.
+ * allocations to a single node for __GFP_THISNODE.
*
* [0] : Zonelist with fallback
- * [1] : No fallback (GFP_THISNODE)
+ * [1] : No fallback (__GFP_THISNODE)
*/
#define MAX_ZONELISTS 2
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index d318193..11692de 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -46,6 +46,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
+struct optimistic_spin_queue;
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
@@ -55,7 +56,7 @@
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- void *spin_mlock; /* Spinner MCS lock */
+ struct optimistic_spin_queue *osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
@@ -179,4 +180,4 @@
# define arch_mutex_cpu_relax() cpu_relax()
#endif
-#endif
+#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1005ebf..5a09a48 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -163,4 +163,11 @@
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8eeebd..daafd95 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3014,7 +3014,7 @@
{
return __skb_gso_segment(skb, features, true);
}
-__be16 skb_network_protocol(struct sk_buff *skb);
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b2fb167..5624e4e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -467,9 +467,14 @@
};
struct nfs_release_lockowner_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_lowner lock_owner;
};
+struct nfs_release_lockowner_res {
+ struct nfs4_sequence_res seq_res;
+};
+
struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb57c89..33aa2ca 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1169,8 +1169,23 @@
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{
+ int rc = pci_enable_msi_range(dev, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
@@ -1189,9 +1204,14 @@
static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
int maxvec)
{ return -ENOSYS; }
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec, int maxvec)
{ return -ENOSYS; }
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{ return -ENOSYS; }
#endif
#ifdef CONFIG_PCIEPORTBUS
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 97fbecd..7399e6a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2531,6 +2531,9 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
+#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
+#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index dbaf990..8183b46f 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -247,9 +247,10 @@
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
- container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
- })
+({ \
+ typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
+ container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+})
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -285,11 +286,11 @@
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_first_or_null_rcu(ptr, type, member) \
- ({struct list_head *__ptr = (ptr); \
- struct list_head *__next = ACCESS_ONCE(__ptr->next); \
- likely(__ptr != __next) ? \
- list_entry_rcu(__next, type, member) : NULL; \
- })
+({ \
+ struct list_head *__ptr = (ptr); \
+ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
+})
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 72bf3a0..00a7fd6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2001
*
@@ -44,7 +44,9 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <asm/barrier.h>
+extern int rcu_expedited; /* for sysctl */
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
@@ -314,7 +316,7 @@
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
- lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
+ lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
@@ -479,11 +481,9 @@
do { \
rcu_preempt_sleep_check(); \
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh" \
- " read-side critical section"); \
+ "Illegal context switch in RCU-bh read-side critical section"); \
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched"\
- " read-side critical section"); \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
@@ -510,43 +510,40 @@
#endif /* #else #ifdef __CHECKER__ */
#define __rcu_access_pointer(p, space) \
- ({ \
- typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
- })
+({ \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+})
#define __rcu_dereference_check(p, c, space) \
- ({ \
- typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
- " usage"); \
- rcu_dereference_sparse(p, space); \
- smp_read_barrier_depends(); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
- })
+({ \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ rcu_dereference_sparse(p, space); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+})
#define __rcu_dereference_protected(p, c, space) \
- ({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
- " usage"); \
- rcu_dereference_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(p)); \
- })
+({ \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(p)); \
+})
#define __rcu_access_index(p, space) \
- ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
- })
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ rcu_dereference_sparse(p, space); \
+ (_________p1); \
+})
#define __rcu_dereference_index_check(p, c) \
- ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check()" \
- " usage"); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c, \
+ "suspicious rcu_dereference_index_check() usage"); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -585,12 +582,7 @@
* please be careful when making changes to rcu_assign_pointer() and the
* other macros that it invokes.
*/
-#define rcu_assign_pointer(p, v) \
- do { \
- smp_wmb(); \
- ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
- } while (0)
-
+#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
@@ -1015,11 +1007,21 @@
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#ifdef CONFIG_RCU_NOCB_CPU
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+ *delta_jiffies = ULONG_MAX;
+ return 0;
+}
+#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+
+#if defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
+#elif defined(CONFIG_RCU_NOCB_CPU)
bool rcu_is_nocb_cpu(int cpu);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+#endif
/* Only for use by adaptive-ticks code. */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 6f01771..425c659 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
@@ -27,6 +27,16 @@
#include <linux/cache.h>
+static inline unsigned long get_state_synchronize_rcu(void)
+{
+ return 0;
+}
+
+static inline void cond_synchronize_rcu(unsigned long oldstate)
+{
+ might_sleep();
+}
+
static inline void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
@@ -68,12 +78,6 @@
call_rcu(head, func);
}
-static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
-{
- *delta_jiffies = ULONG_MAX;
- return 0;
-}
-
static inline void rcu_note_context_switch(int cpu)
{
rcu_sched_qs(cpu);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 72137ee..a59ca05 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
@@ -31,7 +31,9 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(int cpu);
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
void rcu_cpu_stall_reset(void);
/*
@@ -74,6 +76,8 @@
void rcu_barrier(void);
void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
+unsigned long get_state_synchronize_rcu(void);
+void cond_synchronize_rcu(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1da693d..b66c211 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -250,8 +250,7 @@
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
- int (*file_nonlinear)(struct page *, struct address_space *,
- struct vm_area_struct *vma);
+ int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a781dec..825ed83 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3,6 +3,8 @@
#include <uapi/linux/sched.h>
+#include <linux/sched/prio.h>
+
struct sched_param {
int sched_priority;
@@ -1077,6 +1079,7 @@
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
+ int depth;
struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq;
@@ -1460,6 +1463,9 @@
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+ unsigned long preempt_disable_ip;
+#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
@@ -1470,9 +1476,10 @@
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
- int numa_migrate_deferred;
unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
+ u64 last_task_numa_placement;
+ u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct list_head numa_entry;
@@ -1483,15 +1490,22 @@
* Scheduling placement decisions are made based on the these counts.
* The values remain static for the duration of a PTE scan
*/
- unsigned long *numa_faults;
+ unsigned long *numa_faults_memory;
unsigned long total_numa_faults;
/*
* numa_faults_buffer records faults per node during the current
- * scan window. When the scan completes, the counts in numa_faults
- * decay and these values are copied.
+ * scan window. When the scan completes, the counts in
+ * numa_faults_memory decay and these values are copied.
*/
- unsigned long *numa_faults_buffer;
+ unsigned long *numa_faults_buffer_memory;
+
+ /*
+ * Track the nodes the process was running on when a NUMA hinting
+ * fault was incurred.
+ */
+ unsigned long *numa_faults_cpu;
+ unsigned long *numa_faults_buffer_cpu;
/*
* numa_faults_locality tracks if faults recorded during the last
@@ -1596,8 +1610,8 @@
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p);
-
-extern unsigned int sysctl_numa_balancing_migrate_deferred;
+extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+ int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
@@ -1613,6 +1627,11 @@
static inline void task_numa_free(struct task_struct *p)
{
}
+static inline bool should_numa_migrate_memory(struct task_struct *p,
+ struct page *page, int src_nid, int dst_cpu)
+{
+ return true;
+}
#endif
static inline struct pid *task_pid(struct task_struct *task)
@@ -2080,7 +2099,16 @@
extern bool yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
-extern int task_nice(const struct task_struct *p);
+/**
+ * task_nice - return the nice value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
+ */
+static inline int task_nice(const struct task_struct *p)
+{
+ return PRIO_TO_NICE((p)->static_prio);
+}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
new file mode 100644
index 0000000..ac32258
--- /dev/null
+++ b/include/linux/sched/prio.h
@@ -0,0 +1,44 @@
+#ifndef _SCHED_PRIO_H
+#define _SCHED_PRIO_H
+
+#define MAX_NICE 19
+#define MIN_NICE -20
+#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
+
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space. This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO 100
+#define MAX_RT_PRIO MAX_USER_RT_PRIO
+
+#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO)
+#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+#endif /* _SCHED_PRIO_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 34e4ebe..6341f5b 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -1,24 +1,7 @@
#ifndef _SCHED_RT_H
#define _SCHED_RT_H
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
-
-#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+#include <linux/sched/prio.h>
static inline int rt_prio(int prio)
{
@@ -35,6 +18,7 @@
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -46,6 +30,12 @@
{
return p->normal_prio;
}
+
+static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+{
+ return 0;
+}
+
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
diff --git a/include/linux/security.h b/include/linux/security.h
index 5623a7f..2fc42d1 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1040,6 +1040,7 @@
* Allocate a security structure to the xp->security field; the security
* field is initialized to NULL when the xfrm_policy is allocated.
* Return 0 if operation was successful (memory to allocate, legal context)
+ * @gfp is to specify the context for the allocation
* @xfrm_policy_clone_security:
* @old_ctx contains an existing xfrm_sec_ctx.
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
@@ -1683,7 +1684,7 @@
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
@@ -2859,7 +2860,8 @@
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx);
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
@@ -2877,7 +2879,9 @@
#else /* CONFIG_SECURITY_NETWORK_XFRM */
-static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3ebbbe7..15ede6a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2451,8 +2451,8 @@
unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
-void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
- int len, int hlen);
+int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
+ int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
@@ -2725,7 +2725,7 @@
static inline void nf_reset_trace(struct sk_buff *skb)
{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
@@ -2742,6 +2742,9 @@
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+ dst->nf_trace = src->nf_trace;
+#endif
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9260abd..b5b2df6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -410,7 +410,7 @@
*
* %GFP_NOWAIT - Allocation will not sleep.
*
- * %GFP_THISNODE - Allocate node-local memory only.
+ * %__GFP_THISNODE - Allocate node-local memory only.
*
* %GFP_DMA - Allocation suitable for DMA.
* Should only be used for kmalloc() caches. Otherwise, use a
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 9b058ee..a2783cb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (C) IBM Corporation, 2006
* Copyright (C) Fujitsu, 2012
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 40ed9e9..1e67b7a 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -98,6 +98,8 @@
#define __MAP(n,...) __MAP##n(__VA_ARGS__)
#define __SC_DECL(t, a) t a
+#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
#define __SC_CAST(t, a) (t) a
@@ -281,13 +283,15 @@
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setattr(pid_t pid,
- struct sched_attr __user *attr);
+ struct sched_attr __user *attr,
+ unsigned int flags);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_getattr(pid_t pid,
struct sched_attr __user *attr,
- unsigned int size);
+ unsigned int size,
+ unsigned int flags);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/torture.h b/include/linux/torture.h
new file mode 100644
index 0000000..b2e2b46
--- /dev/null
+++ b/include/linux/torture.h
@@ -0,0 +1,100 @@
+/*
+ * Common functions for in-kernel torture tests.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_TORTURE_H
+#define __LINUX_TORTURE_H
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+#include <linux/lockdep.h>
+#include <linux/completion.h>
+#include <linux/debugobjects.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+/* Definitions for a non-string torture-test module parameter. */
+#define torture_param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+#define TORTURE_FLAG "-torture:"
+#define TOROUT_STRING(s) \
+ pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
+#define VERBOSE_TOROUT_STRING(s) \
+ do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
+#define VERBOSE_TOROUT_ERRSTRING(s) \
+ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+
+/* Definitions for a non-string torture-test module parameter. */
+#define torture_parm(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+/* Definitions for online/offline exerciser. */
+int torture_onoff_init(long ooholdoff, long oointerval);
+char *torture_onoff_stats(char *page);
+bool torture_onoff_failures(void);
+
+/* Low-rider random number generator. */
+struct torture_random_state {
+ unsigned long trs_state;
+ long trs_count;
+};
+#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+unsigned long torture_random(struct torture_random_state *trsp);
+
+/* Task shuffler, which causes CPUs to occasionally go idle. */
+void torture_shuffle_task_register(struct task_struct *tp);
+int torture_shuffle_init(long shuffint);
+
+/* Test auto-shutdown handling. */
+void torture_shutdown_absorb(const char *title);
+int torture_shutdown_init(int ssecs, void (*cleanup)(void));
+
+/* Task stuttering, which forces load/no-load transitions. */
+void stutter_wait(const char *title);
+int torture_stutter_init(int s);
+
+/* Initialization and cleanup. */
+void torture_init_begin(char *ttype, bool v, int *runnable);
+void torture_init_end(void);
+bool torture_cleanup(void);
+bool torture_must_stop(void);
+bool torture_must_stop_irq(void);
+void torture_kthread_stopping(char *title);
+int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+ char *f, struct task_struct **tp);
+void _torture_stop_kthread(char *m, struct task_struct **tp);
+
+#define torture_create_kthread(n, arg, tp) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+ "Failed to create " #n, &(tp))
+#define torture_stop_kthread(n, tp) \
+ _torture_stop_kthread("Stopping " #n " task", &(tp))
+
+#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index accc497..7159a0a 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -60,6 +60,12 @@
unsigned int num_tracepoints;
struct tracepoint * const *tracepoints_ptrs;
};
+bool trace_module_has_bad_taint(struct module *mod);
+#else
+static inline bool trace_module_has_bad_taint(struct module *mod)
+{
+ return false;
+}
#endif /* CONFIG_MODULES */
struct tracepoint_iter {
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index c3fa807..2c14d9c 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -88,6 +88,7 @@
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
struct cdc_ncm_ctx {
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
struct hrtimer tx_timer;
struct tasklet_struct bh;
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e303eef..0662e98 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -30,7 +30,7 @@
struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
- wait_queue_head_t *wait;
+ wait_queue_head_t wait;
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521b..704f4f6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,10 +419,7 @@
static struct lock_class_key __key; \
const char *__lock_name; \
\
- if (__builtin_constant_p(fmt)) \
- __lock_name = (fmt); \
- else \
- __lock_name = #fmt; \
+ __lock_name = #fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fc0e432..021b8a3 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,7 +97,7 @@
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
+void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 9650a3f..b4956a5 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -31,8 +31,10 @@
#define IF_PREFIX_AUTOCONF 0x02
enum {
+ INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
+ INET6_IFADDR_STATE_ERRDAD,
INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
@@ -58,7 +60,7 @@
unsigned long cstamp; /* created timestamp */
unsigned long tstamp; /* updated timestamp */
- struct timer_list dad_timer;
+ struct delayed_work dad_work;
struct inet6_dev *idev;
struct rt6_info *rt;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 48ed75c..e77c104 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -129,6 +129,7 @@
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/sock.h b/include/net/sock.h
index 5c3f7c3..b9586a1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1488,6 +1488,11 @@
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+static inline void sock_release_ownership(struct sock *sk)
+{
+ sk->sk_lock.owned = 0;
+}
+
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -2186,7 +2191,6 @@
{
#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
(1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 56fc366..743acce 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -480,20 +480,21 @@
#ifdef CONFIG_SYN_COOKIES
#include <linux/ktime.h>
-/* Syncookies use a monotonic timer which increments every 64 seconds.
+/* Syncookies use a monotonic timer which increments every 60 seconds.
* This counter is used both as a hash input and partially encoded into
* the cookie value. A cookie is only validated further if the delta
* between the current counter value and the encoded one is less than this,
- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
#define MAX_SYNCOOKIE_AGE 2
static inline u32 tcp_cookie_time(void)
{
- struct timespec now;
- getnstimeofday(&now);
- return now.tv_sec >> 6; /* 64 seconds granularity */
+ u64 val = get_jiffies_64();
+
+ do_div(val, 60 * HZ);
+ return val;
}
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
@@ -1303,7 +1304,8 @@
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
struct msghdr *data; /* data in MSG_FASTOPEN */
- u16 copied; /* queued in tcp_connect() */
+ size_t size;
+ int copied; /* queued in tcp_connect() */
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index afa5730..fb5654a 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1648,6 +1648,11 @@
}
#endif
+static inline int aead_len(struct xfrm_algo_aead *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
static inline int xfrm_alg_len(const struct xfrm_algo *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
@@ -1686,6 +1691,12 @@
return 0;
}
+static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
+{
+ return kmemdup(orig, aead_len(orig), GFP_KERNEL);
+}
+
+
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 68d92e3..6e89ef6 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -449,14 +449,22 @@
/* dapm audio pin control and status */
int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
const char *pin);
int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
void snd_soc_dapm_auto_nc_codec_pins(struct snd_soc_codec *codec);
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index ae5a171..4483fad 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -12,6 +12,7 @@
int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
+ void (*iscsit_wait_conn)(struct iscsi_conn *);
void (*iscsit_free_conn)(struct iscsi_conn *);
int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ddc179b..1fef3e6 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -83,7 +83,7 @@
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->action = action;
__entry->runstate = task->tk_runstate;
@@ -91,7 +91,7 @@
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c7bbbe7..464ea82 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,11 +287,11 @@
__field(int, reason)
),
TP_fast_assign(
- unsigned long older_than_this = work->older_than_this;
+ unsigned long *older_than_this = work->older_than_this;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
- __entry->older = older_than_this;
+ __entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
- (jiffies - older_than_this) * 1000 / HZ : -1;
+ (jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1a8b28d..1ee19a2 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -310,15 +310,12 @@
#undef __array
#define __array(type, item, len) \
do { \
- mutex_lock(&event_storage_mutex); \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a20a9b4..6db6678 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -191,6 +191,7 @@
/* fs/readdir.c */
#define __NR_getdents64 61
+#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
/* fs/read_write.c */
@@ -692,9 +693,13 @@
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 273
__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
#undef __NR_syscalls
-#define __NR_syscalls 274
+#define __NR_syscalls 276
/*
* All syscalls below here should go away really,
diff --git a/init/Kconfig b/init/Kconfig
index 009a797..d56cb03 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1387,6 +1387,13 @@
support for "fast userspace mutexes". The resulting kernel may not
run glibc-based applications correctly.
+config HAVE_FUTEX_CMPXCHG
+ bool
+ help
+ Architectures should select this if futex_atomic_cmpxchg_inatomic()
+ is implemented and always working. This removes a couple of runtime
+ checks.
+
config EPOLL
bool "Enable eventpoll support" if EXPERT
default y
diff --git a/init/main.c b/init/main.c
index eb03090..9c7fd4c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -561,7 +561,6 @@
init_timers();
hrtimers_init();
softirq_init();
- acpi_early_init();
timekeeping_init();
time_init();
sched_clock_postinit();
@@ -613,6 +612,7 @@
calibrate_delay();
pidmap_init();
anon_vma_init();
+ acpi_early_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
diff --git a/ipc/compat.c b/ipc/compat.c
index f486b00..98b9016 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -430,9 +430,9 @@
}
COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp,
- compat_ssize_t, msgsz, long, msgtyp, int, msgflg)
+ compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg)
{
- return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, msgtyp,
+ return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp,
msgflg, compat_do_msg_fill);
}
@@ -498,7 +498,7 @@
return err;
}
-long compat_sys_msgctl(int first, int second, void __user *uptr)
+COMPAT_SYSCALL_DEFINE3(msgctl, int, first, int, second, void __user *, uptr)
{
int err, err2;
struct msqid64_ds m64;
@@ -668,7 +668,7 @@
return err;
}
-long compat_sys_shmctl(int first, int second, void __user *uptr)
+COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
{
void __user *p;
struct shmid64_ds s64;
@@ -749,8 +749,9 @@
return err;
}
-long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsops, const struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
+ unsigned, nsops,
+ const struct compat_timespec __user *, timeout)
{
struct timespec __user *ts64 = NULL;
if (timeout) {
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
index 63d7c6de..d5874729 100644
--- a/ipc/compat_mq.c
+++ b/ipc/compat_mq.c
@@ -46,9 +46,9 @@
| __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs);
}
-asmlinkage long compat_sys_mq_open(const char __user *u_name,
- int oflag, compat_mode_t mode,
- struct compat_mq_attr __user *u_attr)
+COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
+ int, oflag, compat_mode_t, mode,
+ struct compat_mq_attr __user *, u_attr)
{
void __user *p = NULL;
if (u_attr && oflag & O_CREAT) {
@@ -78,10 +78,10 @@
return 0;
}
-asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
- const char __user *u_msg_ptr,
- size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout)
+COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
+ const char __user *, u_msg_ptr,
+ compat_size_t, msg_len, unsigned int, msg_prio,
+ const struct compat_timespec __user *, u_abs_timeout)
{
struct timespec __user *u_ts;
@@ -92,10 +92,10 @@
msg_prio, u_ts);
}
-asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
- char __user *u_msg_ptr,
- size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout)
+COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
+ char __user *, u_msg_ptr,
+ compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
+ const struct compat_timespec __user *, u_abs_timeout)
{
struct timespec __user *u_ts;
if (compat_prepare_timeout(&u_ts, u_abs_timeout))
@@ -105,8 +105,8 @@
u_msg_prio, u_ts);
}
-asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
- const struct compat_sigevent __user *u_notification)
+COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+ const struct compat_sigevent __user *, u_notification)
{
struct sigevent __user *p = NULL;
if (u_notification) {
@@ -122,9 +122,9 @@
return sys_mq_notify(mqdes, p);
}
-asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
- const struct compat_mq_attr __user *u_mqstat,
- struct compat_mq_attr __user *u_omqstat)
+COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
+ const struct compat_mq_attr __user *, u_mqstat,
+ struct compat_mq_attr __user *, u_omqstat)
{
struct mq_attr mqstat;
struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 383d638..5bb8bfe6 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -22,6 +22,16 @@
return which;
}
+static int proc_mq_dointvec(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table mq_table;
+ memcpy(&mq_table, table, sizeof(mq_table));
+ mq_table.data = get_mq(table);
+
+ return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
+}
+
static int proc_mq_dointvec_minmax(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -33,12 +43,10 @@
lenp, ppos);
}
#else
+#define proc_mq_dointvec NULL
#define proc_mq_dointvec_minmax NULL
#endif
-static int msg_queues_limit_min = MIN_QUEUESMAX;
-static int msg_queues_limit_max = HARD_QUEUESMAX;
-
static int msg_max_limit_min = MIN_MSGMAX;
static int msg_max_limit_max = HARD_MSGMAX;
@@ -51,9 +59,7 @@
.data = &init_ipc_ns.mq_queues_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
- .extra1 = &msg_queues_limit_min,
- .extra2 = &msg_queues_limit_max,
+ .proc_handler = proc_mq_dointvec,
},
{
.procname = "msg_max",
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ccf1f9f..c3b3117 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -433,9 +433,9 @@
error = -EACCES;
goto out_unlock;
}
- if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
- (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
- !capable(CAP_SYS_RESOURCE))) {
+
+ if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+ !capable(CAP_SYS_RESOURCE)) {
error = -ENOSPC;
goto out_unlock;
}
diff --git a/ipc/msg.c b/ipc/msg.c
index 245db11..6498531 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -901,6 +901,8 @@
return -EINVAL;
if (msgflg & MSG_COPY) {
+ if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
+ return -EINVAL;
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);
diff --git a/kernel/Makefile b/kernel/Makefile
index bc010ee..f2a8b62 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -18,11 +18,13 @@
CFLAGS_REMOVE_irq_work.o = -pg
endif
+# cond_syscall is currently not LTO compatible
+CFLAGS_sys_ni.o = $(DISABLE_LTO)
+
obj-y += sched/
obj-y += locking/
obj-y += power/
obj-y += printk/
-obj-y += cpu/
obj-y += irq/
obj-y += rcu/
@@ -93,6 +95,7 @@
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
+obj-$(CONFIG_TORTURE_TEST) += torture.o
$(obj)/configs.o: $(obj)/config_data.h
diff --git a/kernel/audit.c b/kernel/audit.c
index 34c5a23..95a20f3 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@
struct audit_reply {
__u32 portid;
- pid_t pid;
+ struct net *net;
struct sk_buff *skb;
};
@@ -500,7 +500,7 @@
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
- struct net *net = get_net_ns_by_pid(dest->pid);
+ struct net *net = dest->net;
struct audit_net *aunet = net_generic(net, audit_net_id);
/* wait for parent to finish and send an ACK */
@@ -510,6 +510,7 @@
while ((skb = __skb_dequeue(&dest->q)) != NULL)
netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
+ put_net(net);
kfree(dest);
return 0;
@@ -543,7 +544,7 @@
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
- struct net *net = get_net_ns_by_pid(reply->pid);
+ struct net *net = reply->net;
struct audit_net *aunet = net_generic(net, audit_net_id);
mutex_lock(&audit_cmd_mutex);
@@ -552,12 +553,13 @@
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
+ put_net(net);
kfree(reply);
return 0;
}
/**
* audit_send_reply - send an audit reply message via netlink
- * @portid: netlink port to which to send reply
+ * @request_skb: skb of request we are replying to (used to target the reply)
* @seq: sequence number
* @type: audit message type
* @done: done (last) flag
@@ -568,9 +570,11 @@
* Allocates an skb, builds the netlink message, and sends it to the port id.
* No failure notifications.
*/
-static void audit_send_reply(__u32 portid, int seq, int type, int done,
+static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
int multi, const void *payload, int size)
{
+ u32 portid = NETLINK_CB(request_skb).portid;
+ struct net *net = sock_net(NETLINK_CB(request_skb).sk);
struct sk_buff *skb;
struct task_struct *tsk;
struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
@@ -583,8 +587,8 @@
if (!skb)
goto out;
+ reply->net = get_net(net);
reply->portid = portid;
- reply->pid = task_pid_vnr(current);
reply->skb = skb;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
@@ -604,9 +608,19 @@
int err = 0;
/* Only support the initial namespaces for now. */
+ /*
+ * We return ECONNREFUSED because it tricks userspace into thinking
+ * that audit was not configured into the kernel. Lots of users
+ * configure their PAM stack (because that's what the distro does)
+ * to reject login if unable to send messages to audit. If we return
+ * ECONNREFUSED the PAM stack thinks the kernel does not have audit
+ * configured in and will let login proceed. If we return EPERM
+ * userspace will reject all logins. This should be removed when we
+ * support non init namespaces!!
+ */
if ((current_user_ns() != &init_user_ns) ||
(task_active_pid_ns(current) != &init_pid_ns))
- return -EPERM;
+ return -ECONNREFUSED;
switch (msg_type) {
case AUDIT_LIST:
@@ -673,8 +687,7 @@
seq = nlmsg_hdr(skb)->nlmsg_seq;
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
- &af, sizeof(af));
+ audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af));
return 0;
}
@@ -794,8 +807,7 @@
s.backlog = skb_queue_len(&audit_skb_queue);
s.version = AUDIT_VERSION_LATEST;
s.backlog_wait_time = audit_backlog_wait_time;
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
- &s, sizeof(s));
+ audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_SET: {
@@ -905,7 +917,7 @@
seq, data, nlmsg_len(nlh));
break;
case AUDIT_LIST_RULES:
- err = audit_list_rules_send(NETLINK_CB(skb).portid, seq);
+ err = audit_list_rules_send(skb, seq);
break;
case AUDIT_TRIM:
audit_trim_trees();
@@ -970,8 +982,8 @@
memcpy(sig_data->ctx, ctx, len);
security_release_secctx(ctx, len);
}
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO,
- 0, 0, sig_data, sizeof(*sig_data) + len);
+ audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
+ sig_data, sizeof(*sig_data) + len);
kfree(sig_data);
break;
case AUDIT_TTY_GET: {
@@ -983,8 +995,7 @@
s.log_passwd = tsk->signal->audit_tty_log_passwd;
spin_unlock(&tsk->sighand->siglock);
- audit_send_reply(NETLINK_CB(skb).portid, seq,
- AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
+ audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_TTY_SET: {
diff --git a/kernel/audit.h b/kernel/audit.h
index 57cc64d..8df13221 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -247,7 +247,7 @@
struct audit_netlink_list {
__u32 portid;
- pid_t pid;
+ struct net *net;
struct sk_buff_head q;
};
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 67ccf0e..135944a 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -916,7 +916,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
return 0;
}
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 2596fac..70b4554 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -471,7 +471,7 @@
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *dname)
+ const unsigned char *dname, u32 cookie)
{
struct inode *inode;
struct audit_parent *parent;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 14a78cc..92062fd 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -29,6 +29,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
#include "audit.h"
/*
@@ -1065,11 +1067,13 @@
/**
* audit_list_rules_send - list the audit rules
- * @portid: target portid for netlink audit messages
+ * @request_skb: skb of request we are replying to (used to target the reply)
* @seq: netlink audit message sequence (serial) number
*/
-int audit_list_rules_send(__u32 portid, int seq)
+int audit_list_rules_send(struct sk_buff *request_skb, int seq)
{
+ u32 portid = NETLINK_CB(request_skb).portid;
+ struct net *net = sock_net(NETLINK_CB(request_skb).sk);
struct task_struct *tsk;
struct audit_netlink_list *dest;
int err = 0;
@@ -1083,8 +1087,8 @@
dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
if (!dest)
return -ENOMEM;
+ dest->net = get_net(net);
dest->portid = portid;
- dest->pid = task_pid_vnr(current);
skb_queue_head_init(&dest->q);
mutex_lock(&audit_filter_mutex);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2f46ba..0c753dd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -886,7 +886,9 @@
* per-subsystem and moved to css->id so that lookups are
* successful until the target css is released.
*/
+ mutex_lock(&cgroup_mutex);
idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ mutex_unlock(&cgroup_mutex);
cgrp->id = -1;
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1566,10 +1568,10 @@
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
- root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
- 0, 1, GFP_KERNEL);
- if (root_cgrp->id < 0)
+ ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+ if (ret < 0)
goto unlock_drop;
+ root_cgrp->id = ret;
/* Check for name clashes with existing mounts */
ret = -EBUSY;
@@ -2763,10 +2765,7 @@
*/
update_before = cgroup_serial_nr_next;
- mutex_unlock(&cgroup_mutex);
-
/* add/rm files for all cgroups created before */
- rcu_read_lock();
css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
struct cgroup *cgrp = css->cgroup;
@@ -2775,23 +2774,19 @@
inode = cgrp->dentry->d_inode;
dget(cgrp->dentry);
- rcu_read_unlock();
-
dput(prev);
prev = cgrp->dentry;
+ mutex_unlock(&cgroup_mutex);
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
ret = cgroup_addrm_files(cgrp, cfts, is_add);
- mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
-
- rcu_read_lock();
if (ret)
break;
}
- rcu_read_unlock();
+ mutex_unlock(&cgroup_mutex);
dput(prev);
deactivate_super(sb);
return ret;
@@ -2910,9 +2905,14 @@
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
+ * Do it while holding siglock so that we don't end up
+ * racing against cgroup_exit().
*/
+ spin_lock_irq(&p->sighand->siglock);
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
list_add(&p->cg_list, &task_css_set(p)->tasks);
+ spin_unlock_irq(&p->sighand->siglock);
+
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
@@ -4112,17 +4112,17 @@
err = percpu_ref_init(&css->refcnt, css_release);
if (err)
- goto err_free;
+ goto err_free_css;
init_css(css, ss, cgrp);
err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
if (err)
- goto err_free;
+ goto err_free_percpu_ref;
err = online_css(css);
if (err)
- goto err_free;
+ goto err_clear_dir;
dget(cgrp->dentry);
css_get(css->parent);
@@ -4138,8 +4138,11 @@
return 0;
-err_free:
+err_clear_dir:
+ cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt);
+err_free_css:
ss->css_free(css);
return err;
}
@@ -4158,7 +4161,7 @@
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
- int ssid, err = 0;
+ int ssid, err;
struct cgroup_subsys *ss;
struct super_block *sb = root->sb;
@@ -4168,19 +4171,13 @@
return -ENOMEM;
name = cgroup_alloc_name(dentry);
- if (!name)
+ if (!name) {
+ err = -ENOMEM;
goto err_free_cgrp;
+ }
rcu_assign_pointer(cgrp->name, name);
/*
- * Temporarily set the pointer to NULL, so idr_find() won't return
- * a half-baked cgroup.
- */
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
- if (cgrp->id < 0)
- goto err_free_name;
-
- /*
* Only live parents can have children. Note that the liveliness
* check isn't strictly necessary because cgroup_mkdir() and
* cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4189,7 +4186,17 @@
*/
if (!cgroup_lock_live_group(parent)) {
err = -ENODEV;
- goto err_free_id;
+ goto err_free_name;
+ }
+
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+ if (cgrp->id < 0) {
+ err = -ENOMEM;
+ goto err_unlock;
}
/* Grab a reference on the superblock so the hierarchy doesn't
@@ -4221,7 +4228,7 @@
*/
err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
if (err < 0)
- goto err_unlock;
+ goto err_free_id;
lockdep_assert_held(&dentry->d_inode->i_mutex);
cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4257,12 +4264,12 @@
return 0;
-err_unlock:
- mutex_unlock(&cgroup_mutex);
- /* Release the reference count that we took on the superblock */
- deactivate_super(sb);
err_free_id:
idr_remove(&root->cgroup_idr, cgrp->id);
+ /* Release the reference count that we took on the superblock */
+ deactivate_super(sb);
+err_unlock:
+ mutex_unlock(&cgroup_mutex);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
diff --git a/kernel/compat.c b/kernel/compat.c
index 0a09e48..488ff8c 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -110,8 +110,8 @@
return 0;
}
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz)
+COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
+ struct timezone __user *, tz)
{
if (tv) {
struct timeval ktv;
@@ -127,8 +127,8 @@
return 0;
}
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz)
+COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
+ struct timezone __user *, tz)
{
struct timespec kts;
struct timezone ktz;
@@ -236,8 +236,8 @@
return ret;
}
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp)
+COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
+ struct compat_timespec __user *, rmtp)
{
struct timespec tu, rmt;
mm_segment_t oldfs;
@@ -328,7 +328,7 @@
return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
}
-asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
+COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
{
if (tbuf) {
struct tms tms;
@@ -354,7 +354,7 @@
* types that can be passed to put_user()/get_user().
*/
-asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
+COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
{
old_sigset_t s;
long ret;
@@ -424,8 +424,8 @@
#endif
-asmlinkage long compat_sys_setrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
{
struct rlimit r;
@@ -443,8 +443,8 @@
#ifdef COMPAT_RLIM_OLD_INFINITY
-asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
{
struct rlimit r;
int ret;
@@ -470,8 +470,8 @@
#endif
-asmlinkage long compat_sys_getrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
{
struct rlimit r;
int ret;
@@ -596,9 +596,9 @@
return compat_get_bitmap(k, user_mask_ptr, len * 8);
}
-asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
- unsigned int len,
- compat_ulong_t __user *user_mask_ptr)
+COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid,
+ unsigned int, len,
+ compat_ulong_t __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
@@ -616,8 +616,8 @@
return retval;
}
-asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
- compat_ulong_t __user *user_mask_ptr)
+COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
+ compat_ulong_t __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
@@ -662,9 +662,9 @@
return 0;
}
-long compat_sys_timer_create(clockid_t which_clock,
- struct compat_sigevent __user *timer_event_spec,
- timer_t __user *created_timer_id)
+COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
+ struct compat_sigevent __user *, timer_event_spec,
+ timer_t __user *, created_timer_id)
{
struct sigevent __user *event = NULL;
@@ -680,9 +680,9 @@
return sys_timer_create(which_clock, event, created_timer_id);
}
-long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old)
+COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+ struct compat_itimerspec __user *, new,
+ struct compat_itimerspec __user *, old)
{
long err;
mm_segment_t oldfs;
@@ -703,8 +703,8 @@
return err;
}
-long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting)
+COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+ struct compat_itimerspec __user *, setting)
{
long err;
mm_segment_t oldfs;
@@ -720,8 +720,8 @@
return err;
}
-long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
+ struct compat_timespec __user *, tp)
{
long err;
mm_segment_t oldfs;
@@ -737,8 +737,8 @@
return err;
}
-long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
+ struct compat_timespec __user *, tp)
{
long err;
mm_segment_t oldfs;
@@ -754,8 +754,8 @@
return err;
}
-long compat_sys_clock_adjtime(clockid_t which_clock,
- struct compat_timex __user *utp)
+COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
+ struct compat_timex __user *, utp)
{
struct timex txc;
mm_segment_t oldfs;
@@ -777,8 +777,8 @@
return ret;
}
-long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
+ struct compat_timespec __user *, tp)
{
long err;
mm_segment_t oldfs;
@@ -818,9 +818,9 @@
return err;
}
-long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp)
+COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
+ struct compat_timespec __user *, rqtp,
+ struct compat_timespec __user *, rmtp)
{
long err;
mm_segment_t oldfs;
@@ -1010,7 +1010,7 @@
/* compat_time_t is a 32 bit "long" and needs to get converted. */
-asmlinkage long compat_sys_time(compat_time_t __user * tloc)
+COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
{
compat_time_t i;
struct timeval tv;
@@ -1026,7 +1026,7 @@
return i;
}
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
+COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
{
struct timespec tv;
int err;
@@ -1046,7 +1046,7 @@
#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
+COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
{
struct timex txc;
int err, ret;
@@ -1065,11 +1065,11 @@
}
#ifdef CONFIG_NUMA
-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
- compat_uptr_t __user *pages32,
- const int __user *nodes,
- int __user *status,
- int flags)
+COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
+ compat_uptr_t __user *, pages32,
+ const int __user *, nodes,
+ int __user *, status,
+ int, flags)
{
const void __user * __user *pages;
int i;
@@ -1085,10 +1085,10 @@
return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
}
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode,
- const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes)
+COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
+ compat_ulong_t, maxnode,
+ const compat_ulong_t __user *, old_nodes,
+ const compat_ulong_t __user *, new_nodes)
{
unsigned long __user *old = NULL;
unsigned long __user *new = NULL;
diff --git a/kernel/cpu/Makefile b/kernel/cpu/Makefile
deleted file mode 100644
index 59ab052..0000000
--- a/kernel/cpu/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y = idle.o
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4410ac6..e6b1b66 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -974,12 +974,6 @@
* Temporarilly set tasks mems_allowed to target nodes of migration,
* so that the migration code can allocate pages on these nodes.
*
- * Call holding cpuset_mutex, so current's cpuset won't change
- * during this call, as manage_mutex holds off any cpuset_attach()
- * calls. Therefore we don't need to take task_lock around the
- * call to guarantee_online_mems(), as we know no one is changing
- * our task's cpuset.
- *
* While the mm_struct we are migrating is typically from some
* other task, the task_struct mems_allowed that we are hacking
* is for our current task, which must allocate new pages for that
@@ -996,8 +990,10 @@
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+ rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &tsk->mems_allowed);
+ rcu_read_unlock();
}
/*
@@ -2486,9 +2482,9 @@
task_lock(current);
cs = nearest_hardwall_ancestor(task_cs(current));
+ allowed = node_isset(node, cs->mems_allowed);
task_unlock(current);
- allowed = node_isset(node, cs->mems_allowed);
mutex_unlock(&callback_mutex);
return allowed;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 56003c6..661951a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -231,11 +231,29 @@
#define NR_ACCUMULATED_SAMPLES 128
static DEFINE_PER_CPU(u64, running_sample_length);
-void perf_sample_event_took(u64 sample_len_ns)
+static void perf_duration_warn(struct irq_work *w)
{
+ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
u64 avg_local_sample_len;
u64 local_samples_len;
+
+ local_samples_len = __get_cpu_var(running_sample_length);
+ avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
+
+ printk_ratelimited(KERN_WARNING
+ "perf interrupt took too long (%lld > %lld), lowering "
+ "kernel.perf_event_max_sample_rate to %d\n",
+ avg_local_sample_len, allowed_ns >> 1,
+ sysctl_perf_event_sample_rate);
+}
+
+static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
+
+void perf_sample_event_took(u64 sample_len_ns)
+{
u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
+ u64 avg_local_sample_len;
+ u64 local_samples_len;
if (allowed_ns == 0)
return;
@@ -263,13 +281,14 @@
sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
- printk_ratelimited(KERN_WARNING
- "perf samples too long (%lld > %lld), lowering "
- "kernel.perf_event_max_sample_rate to %d\n",
- avg_local_sample_len, allowed_ns,
- sysctl_perf_event_sample_rate);
-
update_perf_cpu_limits();
+
+ if (!irq_work_queue(&perf_duration_work)) {
+ early_printk("perf interrupt took too long (%lld > %lld), lowering "
+ "kernel.perf_event_max_sample_rate to %d\n",
+ avg_local_sample_len, allowed_ns >> 1,
+ sysctl_perf_event_sample_rate);
+ }
}
static atomic64_t perf_event_id;
@@ -1714,7 +1733,7 @@
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
- struct pmu *pmu = group_event->pmu;
+ struct pmu *pmu = ctx->pmu;
u64 now = ctx->time;
bool simulate = false;
@@ -2563,8 +2582,6 @@
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
- pmu = cpuctx->ctx.pmu;
-
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
@@ -6294,7 +6311,7 @@
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
-static void *find_pmu_context(int ctxn)
+static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
{
struct pmu *pmu;
@@ -7856,14 +7873,14 @@
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
- struct perf_event *event, *tmp;
+ struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
+ rcu_read_lock();
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event);
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
- __perf_remove_from_context(event);
+ rcu_read_unlock();
}
static void perf_event_exit_cpu_context(int cpu)
@@ -7887,11 +7904,11 @@
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+ perf_event_exit_cpu_context(cpu);
+
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
-
- perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/extable.c b/kernel/extable.c
index 763faf0..d8a6446 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -36,7 +36,7 @@
extern struct exception_table_entry __stop___ex_table[];
/* Cleared by build time tools if the table is already sorted. */
-u32 __initdata main_extable_sort_needed = 1;
+u32 __initdata __visible main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */
void __init sort_main_extable(void)
diff --git a/kernel/fork.c b/kernel/fork.c
index a17621c..332688e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -237,6 +237,7 @@
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
+ task_numa_free(tsk);
security_task_free(tsk);
exit_creds(tsk);
delayacct_tsk_free(tsk);
diff --git a/kernel/futex.c b/kernel/futex.c
index 44a1261..67dacaf 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -157,7 +157,9 @@
* enqueue.
*/
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
+#endif
/*
* Futex flags used to encode options to functions and preserve them across
@@ -234,6 +236,7 @@
* waiting on a futex.
*/
struct futex_hash_bucket {
+ atomic_t waiters;
spinlock_t lock;
struct plist_head chain;
} ____cacheline_aligned_in_smp;
@@ -253,22 +256,37 @@
smp_mb__after_atomic_inc();
}
-static inline bool hb_waiters_pending(struct futex_hash_bucket *hb)
+/*
+ * Reflects a new waiter being added to the waitqueue.
+ */
+static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
+ atomic_inc(&hb->waiters);
/*
- * Tasks trying to enter the critical region are most likely
- * potential waiters that will be added to the plist. Ensure
- * that wakers won't miss to-be-slept tasks in the window between
- * the wait call and the actual plist_add.
+ * Full barrier (A), see the ordering comment above.
*/
- if (spin_is_locked(&hb->lock))
- return true;
- smp_rmb(); /* Make sure we check the lock state first */
+ smp_mb__after_atomic_inc();
+#endif
+}
- return !plist_head_empty(&hb->chain);
+/*
+ * Reflects a waiter being removed from the waitqueue by wakeup
+ * paths.
+ */
+static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
+{
+#ifdef CONFIG_SMP
+ atomic_dec(&hb->waiters);
+#endif
+}
+
+static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
+{
+#ifdef CONFIG_SMP
+ return atomic_read(&hb->waiters);
#else
- return true;
+ return 1;
#endif
}
@@ -954,6 +972,7 @@
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
+ hb_waiters_dec(hb);
}
/*
@@ -1257,7 +1276,9 @@
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
+ hb_waiters_dec(hb1);
plist_add(&q->list, &hb2->chain);
+ hb_waiters_inc(hb2);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -1600,6 +1621,17 @@
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
+
+ /*
+ * Increment the counter before taking the lock so that
+ * a potential waker won't miss a to-be-slept task that is
+ * waiting for the spinlock. This is safe as all queue_lock()
+ * users end up calling queue_me(). Similarly, for housekeeping,
+ * decrement the counter at queue_unlock() when some error has
+ * occurred and we don't end up adding the task to the list.
+ */
+ hb_waiters_inc(hb);
+
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock); /* implies MB (A) */
@@ -1611,6 +1643,7 @@
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
+ hb_waiters_dec(hb);
}
/**
@@ -2342,6 +2375,7 @@
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
+ hb_waiters_dec(hb);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
@@ -2843,9 +2877,28 @@
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
+static void __init futex_detect_cmpxchg(void)
+{
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+ u32 curval;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+ * runtime detection of the futex_atomic_cmpxchg_inatomic()
+ * functionality. We want to know that before we call in any
+ * of the complex code paths. Also we want to prevent
+ * registration of robust lists in that case. NULL is
+ * guaranteed to fault and we get -EFAULT on functional
+ * implementation, the non-functional ones will return
+ * -ENOSYS.
+ */
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
+#endif
+}
+
static int __init futex_init(void)
{
- u32 curval;
unsigned int futex_shift;
unsigned long i;
@@ -2861,20 +2914,11 @@
&futex_shift, NULL,
futex_hashsize, futex_hashsize);
futex_hashsize = 1UL << futex_shift;
- /*
- * This will fail and we want it. Some arch implementations do
- * runtime detection of the futex_atomic_cmpxchg_inatomic()
- * functionality. We want to know that before we call in any
- * of the complex code paths. Also we want to prevent
- * registration of robust lists in that case. NULL is
- * guaranteed to fault and we get -EFAULT on functional
- * implementation, the non-functional ones will return
- * -ENOSYS.
- */
- if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
- futex_cmpxchg_enabled = 1;
+
+ futex_detect_cmpxchg();
for (i = 0; i < futex_hashsize; i++) {
+ atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index cf68bb3..f140337 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/topology.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 481a13c..d3bf660 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -802,8 +802,7 @@
static void wake_threads_waitq(struct irq_desc *desc)
{
- if (atomic_dec_and_test(&desc->threads_active) &&
- waitqueue_active(&desc->wait_for_threads))
+ if (atomic_dec_and_test(&desc->threads_active))
wake_up(&desc->wait_for_threads);
}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 55fcce6..a82170e 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -61,11 +61,11 @@
*
* Can be re-enqueued while the callback is still in progress.
*/
-void irq_work_queue(struct irq_work *work)
+bool irq_work_queue(struct irq_work *work)
{
/* Only queue if not already pending */
if (!irq_work_claim(work))
- return;
+ return false;
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
@@ -83,6 +83,8 @@
}
preempt_enable();
+
+ return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 60bafbe..45601cf 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1039,10 +1039,10 @@
{}
#ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct compat_kexec_segment __user *segments,
- unsigned long flags)
+COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
+ compat_ulong_t, nr_segments,
+ struct compat_kexec_segment __user *, segments,
+ compat_ulong_t, flags)
{
struct compat_kexec_segment in;
struct kexec_segment out, __user *ksegments;
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index d945a94..e660964 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -19,6 +19,8 @@
#include <linux/sched.h>
#include <linux/capability.h>
+#include <linux/rcupdate.h> /* rcu_expedited */
+
#define KERNEL_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index baab8e5..306a76b 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
-obj-y += mutex.o semaphore.o rwsem.o lglock.o
+obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = -pg
@@ -23,3 +23,4 @@
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index eb8a547..b0e9467 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1936,12 +1936,12 @@
for (;;) {
int distance = curr->lockdep_depth - depth + 1;
- hlock = curr->held_locks + depth-1;
+ hlock = curr->held_locks + depth - 1;
/*
* Only non-recursive-read entries get new dependencies
* added:
*/
- if (hlock->read != 2) {
+ if (hlock->read != 2 && hlock->check) {
if (!check_prev_add(curr, hlock, next,
distance, trylock_loop))
return 0;
@@ -2098,7 +2098,7 @@
* (If lookup_chain_cache() returns with 1 it acquires
* graph_lock for us)
*/
- if (!hlock->trylock && (hlock->check == 2) &&
+ if (!hlock->trylock && hlock->check &&
lookup_chain_cache(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
@@ -2517,7 +2517,7 @@
BUG_ON(usage_bit >= LOCK_USAGE_STATES);
- if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
+ if (!hlock->check)
continue;
if (!mark_lock(curr, hlock, usage_bit))
@@ -2557,7 +2557,7 @@
debug_atomic_inc(hardirqs_on_events);
}
-void trace_hardirqs_on_caller(unsigned long ip)
+__visible void trace_hardirqs_on_caller(unsigned long ip)
{
time_hardirqs_on(CALLER_ADDR0, ip);
@@ -2610,7 +2610,7 @@
/*
* Hardirqs were disabled:
*/
-void trace_hardirqs_off_caller(unsigned long ip)
+__visible void trace_hardirqs_off_caller(unsigned long ip)
{
struct task_struct *curr = current;
@@ -3055,9 +3055,6 @@
int class_idx;
u64 chain_key;
- if (!prove_locking)
- check = 1;
-
if (unlikely(!debug_locks))
return 0;
@@ -3069,8 +3066,8 @@
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
- if (lock->key == &__lockdep_no_validate__)
- check = 1;
+ if (!prove_locking || lock->key == &__lockdep_no_validate__)
+ check = 0;
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
class = lock->class_cache[subclass];
@@ -3138,7 +3135,7 @@
hlock->holdtime_stamp = lockstat_clock();
#endif
- if (check == 2 && !mark_irqflags(curr, hlock))
+ if (check && !mark_irqflags(curr, hlock))
return 0;
/* mark it as used: */
@@ -4191,7 +4188,7 @@
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
-void lockdep_sys_exit(void)
+asmlinkage void lockdep_sys_exit(void)
{
struct task_struct *curr = current;
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
new file mode 100644
index 0000000..f26b1a1
--- /dev/null
+++ b/kernel/locking/locktorture.c
@@ -0,0 +1,452 @@
+/*
+ * Module-based torture test facility for locking
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ * Based on kernel/rcu/torture.c.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+
+torture_param(int, nwriters_stress, -1,
+ "Number of write-locking stress-test threads");
+torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
+torture_param(int, onoff_interval, 0,
+ "Time between CPU hotplugs (s), 0=disable");
+torture_param(int, shuffle_interval, 3,
+ "Number of jiffies between shuffles, 0=disable");
+torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
+torture_param(int, stat_interval, 60,
+ "Number of seconds between stats printk()s");
+torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
+torture_param(bool, verbose, true,
+ "Enable verbose debugging printk()s");
+
+static char *torture_type = "spin_lock";
+module_param(torture_type, charp, 0444);
+MODULE_PARM_DESC(torture_type,
+ "Type of lock to torture (spin_lock, spin_lock_irq, ...)");
+
+static atomic_t n_lock_torture_errors;
+
+static struct task_struct *stats_task;
+static struct task_struct **writer_tasks;
+
+static int nrealwriters_stress;
+static bool lock_is_write_held;
+
+struct lock_writer_stress_stats {
+ long n_write_lock_fail;
+ long n_write_lock_acquired;
+};
+static struct lock_writer_stress_stats *lwsa;
+
+#if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE)
+#define LOCKTORTURE_RUNNABLE_INIT 1
+#else
+#define LOCKTORTURE_RUNNABLE_INIT 0
+#endif
+int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
+module_param(locktorture_runnable, int, 0444);
+MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot");
+
+/* Forward reference. */
+static void lock_torture_cleanup(void);
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+struct lock_torture_ops {
+ void (*init)(void);
+ int (*writelock)(void);
+ void (*write_delay)(struct torture_random_state *trsp);
+ void (*writeunlock)(void);
+ unsigned long flags;
+ const char *name;
+};
+
+static struct lock_torture_ops *cur_ops;
+
+/*
+ * Definitions for lock torture testing.
+ */
+
+static int torture_lock_busted_write_lock(void)
+{
+ return 0; /* BUGGY, do not use in real life!!! */
+}
+
+static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_us = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (nrealwriters_stress * 2000 * longdelay_us)))
+ mdelay(longdelay_us);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_lock_busted_write_unlock(void)
+{
+ /* BUGGY, do not use in real life!!! */
+}
+
+static struct lock_torture_ops lock_busted_ops = {
+ .writelock = torture_lock_busted_write_lock,
+ .write_delay = torture_lock_busted_write_delay,
+ .writeunlock = torture_lock_busted_write_unlock,
+ .name = "lock_busted"
+};
+
+static DEFINE_SPINLOCK(torture_spinlock);
+
+static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
+{
+ spin_lock(&torture_spinlock);
+ return 0;
+}
+
+static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long shortdelay_us = 2;
+ const unsigned long longdelay_us = 100;
+
+ /* We want a short delay mostly to emulate likely code, and
+ * we want a long delay occasionally to force massive contention.
+ */
+ if (!(torture_random(trsp) %
+ (nrealwriters_stress * 2000 * longdelay_us)))
+ mdelay(longdelay_us);
+ if (!(torture_random(trsp) %
+ (nrealwriters_stress * 2 * shortdelay_us)))
+ udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
+{
+ spin_unlock(&torture_spinlock);
+}
+
+static struct lock_torture_ops spin_lock_ops = {
+ .writelock = torture_spin_lock_write_lock,
+ .write_delay = torture_spin_lock_write_delay,
+ .writeunlock = torture_spin_lock_write_unlock,
+ .name = "spin_lock"
+};
+
+static int torture_spin_lock_write_lock_irq(void)
+__acquires(torture_spinlock_irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&torture_spinlock, flags);
+ cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_lock_spin_write_unlock_irq(void)
+__releases(torture_spinlock)
+{
+ spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
+}
+
+static struct lock_torture_ops spin_lock_irq_ops = {
+ .writelock = torture_spin_lock_write_lock_irq,
+ .write_delay = torture_spin_lock_write_delay,
+ .writeunlock = torture_lock_spin_write_unlock_irq,
+ .name = "spin_lock_irq"
+};
+
+/*
+ * Lock torture writer kthread. Repeatedly acquires and releases
+ * the lock, checking for duplicate acquisitions.
+ */
+static int lock_torture_writer(void *arg)
+{
+ struct lock_writer_stress_stats *lwsp = arg;
+ static DEFINE_TORTURE_RANDOM(rand);
+
+ VERBOSE_TOROUT_STRING("lock_torture_writer task started");
+ set_user_nice(current, 19);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ cur_ops->writelock();
+ if (WARN_ON_ONCE(lock_is_write_held))
+ lwsp->n_write_lock_fail++;
+ lock_is_write_held = 1;
+ lwsp->n_write_lock_acquired++;
+ cur_ops->write_delay(&rand);
+ lock_is_write_held = 0;
+ cur_ops->writeunlock();
+ stutter_wait("lock_torture_writer");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("lock_torture_writer");
+ return 0;
+}
+
+/*
+ * Create an lock-torture-statistics message in the specified buffer.
+ */
+static void lock_torture_printk(char *page)
+{
+ bool fail = 0;
+ int i;
+ long max = 0;
+ long min = lwsa[0].n_write_lock_acquired;
+ long long sum = 0;
+
+ for (i = 0; i < nrealwriters_stress; i++) {
+ if (lwsa[i].n_write_lock_fail)
+ fail = true;
+ sum += lwsa[i].n_write_lock_acquired;
+ if (max < lwsa[i].n_write_lock_fail)
+ max = lwsa[i].n_write_lock_fail;
+ if (min > lwsa[i].n_write_lock_fail)
+ min = lwsa[i].n_write_lock_fail;
+ }
+ page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page,
+ "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
+ sum, max, min, max / 2 > min ? "???" : "",
+ fail, fail ? "!!!" : "");
+ if (fail)
+ atomic_inc(&n_lock_torture_errors);
+}
+
+/*
+ * Print torture statistics. Caller must ensure that there is only one
+ * call to this function at a given time!!! This is normally accomplished
+ * by relying on the module system to only have one copy of the module
+ * loaded, and then by giving the lock_torture_stats kthread full control
+ * (or the init/cleanup functions when lock_torture_stats thread is not
+ * running).
+ */
+static void lock_torture_stats_print(void)
+{
+ int size = nrealwriters_stress * 200 + 8192;
+ char *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("lock_torture_stats_print: Out of memory, need: %d",
+ size);
+ return;
+ }
+ lock_torture_printk(buf);
+ pr_alert("%s", buf);
+ kfree(buf);
+}
+
+/*
+ * Periodically prints torture statistics, if periodic statistics printing
+ * was specified via the stat_interval module parameter.
+ *
+ * No need to worry about fullstop here, since this one doesn't reference
+ * volatile state or register callbacks.
+ */
+static int lock_torture_stats(void *arg)
+{
+ VERBOSE_TOROUT_STRING("lock_torture_stats task started");
+ do {
+ schedule_timeout_interruptible(stat_interval * HZ);
+ lock_torture_stats_print();
+ torture_shutdown_absorb("lock_torture_stats");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("lock_torture_stats");
+ return 0;
+}
+
+static inline void
+lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
+ const char *tag)
+{
+ pr_alert("%s" TORTURE_FLAG
+ "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
+ torture_type, tag, nrealwriters_stress, stat_interval, verbose,
+ shuffle_interval, stutter, shutdown_secs,
+ onoff_interval, onoff_holdoff);
+}
+
+static void lock_torture_cleanup(void)
+{
+ int i;
+
+ if (torture_cleanup())
+ return;
+
+ if (writer_tasks) {
+ for (i = 0; i < nrealwriters_stress; i++)
+ torture_stop_kthread(lock_torture_writer,
+ writer_tasks[i]);
+ kfree(writer_tasks);
+ writer_tasks = NULL;
+ }
+
+ torture_stop_kthread(lock_torture_stats, stats_task);
+ lock_torture_stats_print(); /* -After- the stats thread is stopped! */
+
+ if (atomic_read(&n_lock_torture_errors))
+ lock_torture_print_module_parms(cur_ops,
+ "End of test: FAILURE");
+ else if (torture_onoff_failures())
+ lock_torture_print_module_parms(cur_ops,
+ "End of test: LOCK_HOTPLUG");
+ else
+ lock_torture_print_module_parms(cur_ops,
+ "End of test: SUCCESS");
+}
+
+static int __init lock_torture_init(void)
+{
+ int i;
+ int firsterr = 0;
+ static struct lock_torture_ops *torture_ops[] = {
+ &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
+ };
+
+ torture_init_begin(torture_type, verbose, &locktorture_runnable);
+
+ /* Process args and tell the world that the torturer is on the job. */
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
+ cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cur_ops->name) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(torture_ops)) {
+ pr_alert("lock-torture: invalid torture type: \"%s\"\n",
+ torture_type);
+ pr_alert("lock-torture types:");
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
+ pr_alert(" %s", torture_ops[i]->name);
+ pr_alert("\n");
+ torture_init_end();
+ return -EINVAL;
+ }
+ if (cur_ops->init)
+ cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
+ if (nwriters_stress >= 0)
+ nrealwriters_stress = nwriters_stress;
+ else
+ nrealwriters_stress = 2 * num_online_cpus();
+ lock_torture_print_module_parms(cur_ops, "Start of test");
+
+ /* Initialize the statistics so that each run gets its own numbers. */
+
+ lock_is_write_held = 0;
+ lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
+ if (lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("lwsa: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nrealwriters_stress; i++) {
+ lwsa[i].n_write_lock_fail = 0;
+ lwsa[i].n_write_lock_acquired = 0;
+ }
+
+ /* Start up the kthreads. */
+
+ if (onoff_interval > 0) {
+ firsterr = torture_onoff_init(onoff_holdoff * HZ,
+ onoff_interval * HZ);
+ if (firsterr)
+ goto unwind;
+ }
+ if (shuffle_interval > 0) {
+ firsterr = torture_shuffle_init(shuffle_interval);
+ if (firsterr)
+ goto unwind;
+ }
+ if (shutdown_secs > 0) {
+ firsterr = torture_shutdown_init(shutdown_secs,
+ lock_torture_cleanup);
+ if (firsterr)
+ goto unwind;
+ }
+ if (stutter > 0) {
+ firsterr = torture_stutter_init(stutter);
+ if (firsterr)
+ goto unwind;
+ }
+
+ writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
+ GFP_KERNEL);
+ if (writer_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nrealwriters_stress; i++) {
+ firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
+ writer_tasks[i]);
+ if (firsterr)
+ goto unwind;
+ }
+ if (stat_interval > 0) {
+ firsterr = torture_create_kthread(lock_torture_stats, NULL,
+ stats_task);
+ if (firsterr)
+ goto unwind;
+ }
+ torture_init_end();
+ return 0;
+
+unwind:
+ torture_init_end();
+ lock_torture_cleanup();
+ return firsterr;
+}
+
+module_init(lock_torture_init);
+module_exit(lock_torture_cleanup);
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
new file mode 100644
index 0000000..838dc9e
--- /dev/null
+++ b/kernel/locking/mcs_spinlock.c
@@ -0,0 +1,178 @@
+
+#include <linux/percpu.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include "mcs_spinlock.h"
+
+#ifdef CONFIG_SMP
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ *
+ * Using a single mcs node per CPU is safe because sleeping locks should not be
+ * called from interrupt context and we have preemption disabled while
+ * spinning.
+ */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+
+/*
+ * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
+ * Can return NULL in case we were the last queued and we updated @lock instead.
+ */
+static inline struct optimistic_spin_queue *
+osq_wait_next(struct optimistic_spin_queue **lock,
+ struct optimistic_spin_queue *node,
+ struct optimistic_spin_queue *prev)
+{
+ struct optimistic_spin_queue *next = NULL;
+
+ for (;;) {
+ if (*lock == node && cmpxchg(lock, node, prev) == node) {
+ /*
+ * We were the last queued, we moved @lock back. @prev
+ * will now observe @lock and will complete its
+ * unlock()/unqueue().
+ */
+ break;
+ }
+
+ /*
+ * We must xchg() the @node->next value, because if we were to
+ * leave it in, a concurrent unlock()/unqueue() from
+ * @node->next might complete Step-A and think its @prev is
+ * still valid.
+ *
+ * If the concurrent unlock()/unqueue() wins the race, we'll
+ * wait for either @lock to point to us, through its Step-B, or
+ * wait for a new @node->next from its Step-C.
+ */
+ if (node->next) {
+ next = xchg(&node->next, NULL);
+ if (next)
+ break;
+ }
+
+ arch_mutex_cpu_relax();
+ }
+
+ return next;
+}
+
+bool osq_lock(struct optimistic_spin_queue **lock)
+{
+ struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
+ struct optimistic_spin_queue *prev, *next;
+
+ node->locked = 0;
+ node->next = NULL;
+
+ node->prev = prev = xchg(lock, node);
+ if (likely(prev == NULL))
+ return true;
+
+ ACCESS_ONCE(prev->next) = node;
+
+ /*
+ * Normally @prev is untouchable after the above store; because at that
+ * moment unlock can proceed and wipe the node element from stack.
+ *
+ * However, since our nodes are static per-cpu storage, we're
+ * guaranteed their existence -- this allows us to apply
+ * cmpxchg in an attempt to undo our queueing.
+ */
+
+ while (!smp_load_acquire(&node->locked)) {
+ /*
+ * If we need to reschedule bail... so we can block.
+ */
+ if (need_resched())
+ goto unqueue;
+
+ arch_mutex_cpu_relax();
+ }
+ return true;
+
+unqueue:
+ /*
+ * Step - A -- stabilize @prev
+ *
+ * Undo our @prev->next assignment; this will make @prev's
+ * unlock()/unqueue() wait for a next pointer since @lock points to us
+ * (or later).
+ */
+
+ for (;;) {
+ if (prev->next == node &&
+ cmpxchg(&prev->next, node, NULL) == node)
+ break;
+
+ /*
+ * We can only fail the cmpxchg() racing against an unlock(),
+ * in which case we should observe @node->locked becomming
+ * true.
+ */
+ if (smp_load_acquire(&node->locked))
+ return true;
+
+ arch_mutex_cpu_relax();
+
+ /*
+ * Or we race against a concurrent unqueue()'s step-B, in which
+ * case its step-C will write us a new @node->prev pointer.
+ */
+ prev = ACCESS_ONCE(node->prev);
+ }
+
+ /*
+ * Step - B -- stabilize @next
+ *
+ * Similar to unlock(), wait for @node->next or move @lock from @node
+ * back to @prev.
+ */
+
+ next = osq_wait_next(lock, node, prev);
+ if (!next)
+ return false;
+
+ /*
+ * Step - C -- unlink
+ *
+ * @prev is stable because its still waiting for a new @prev->next
+ * pointer, @next is stable because our @node->next pointer is NULL and
+ * it will wait in Step-A.
+ */
+
+ ACCESS_ONCE(next->prev) = prev;
+ ACCESS_ONCE(prev->next) = next;
+
+ return false;
+}
+
+void osq_unlock(struct optimistic_spin_queue **lock)
+{
+ struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
+ struct optimistic_spin_queue *next;
+
+ /*
+ * Fast path for the uncontended case.
+ */
+ if (likely(cmpxchg(lock, node, NULL) == node))
+ return;
+
+ /*
+ * Second most likely case.
+ */
+ next = xchg(&node->next, NULL);
+ if (next) {
+ ACCESS_ONCE(next->locked) = 1;
+ return;
+ }
+
+ next = osq_wait_next(lock, node, NULL);
+ if (next)
+ ACCESS_ONCE(next->locked) = 1;
+}
+
+#endif
+
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
new file mode 100644
index 0000000..a2dbac4
--- /dev/null
+++ b/kernel/locking/mcs_spinlock.h
@@ -0,0 +1,129 @@
+/*
+ * MCS lock defines
+ *
+ * This file contains the main data structure and API definitions of MCS lock.
+ *
+ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
+ * with the desirable properties of being fair, and with each cpu trying
+ * to acquire the lock spinning on a local variable.
+ * It avoids expensive cache bouncings that common test-and-set spin-lock
+ * implementations incur.
+ */
+#ifndef __LINUX_MCS_SPINLOCK_H
+#define __LINUX_MCS_SPINLOCK_H
+
+#include <asm/mcs_spinlock.h>
+
+struct mcs_spinlock {
+ struct mcs_spinlock *next;
+ int locked; /* 1 if lock acquired */
+};
+
+#ifndef arch_mcs_spin_lock_contended
+/*
+ * Using smp_load_acquire() provides a memory barrier that ensures
+ * subsequent operations happen after the lock is acquired.
+ */
+#define arch_mcs_spin_lock_contended(l) \
+do { \
+ while (!(smp_load_acquire(l))) \
+ arch_mutex_cpu_relax(); \
+} while (0)
+#endif
+
+#ifndef arch_mcs_spin_unlock_contended
+/*
+ * smp_store_release() provides a memory barrier to ensure all
+ * operations in the critical section has been completed before
+ * unlocking.
+ */
+#define arch_mcs_spin_unlock_contended(l) \
+ smp_store_release((l), 1)
+#endif
+
+/*
+ * Note: the smp_load_acquire/smp_store_release pair is not
+ * sufficient to form a full memory barrier across
+ * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
+ * For applications that need a full barrier across multiple cpus
+ * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
+ * used after mcs_lock.
+ */
+
+/*
+ * In order to acquire the lock, the caller should declare a local node and
+ * pass a reference of the node to this function in addition to the lock.
+ * If the lock has already been acquired, then this will proceed to spin
+ * on this node->locked until the previous lock holder sets the node->locked
+ * in mcs_spin_unlock().
+ *
+ * We don't inline mcs_spin_lock() so that perf can correctly account for the
+ * time spent in this lock function.
+ */
+static inline
+void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+ struct mcs_spinlock *prev;
+
+ /* Init node */
+ node->locked = 0;
+ node->next = NULL;
+
+ prev = xchg(lock, node);
+ if (likely(prev == NULL)) {
+ /*
+ * Lock acquired, don't need to set node->locked to 1. Threads
+ * only spin on its own node->locked value for lock acquisition.
+ * However, since this thread can immediately acquire the lock
+ * and does not proceed to spin on its own node->locked, this
+ * value won't be used. If a debug mode is needed to
+ * audit lock status, then set node->locked value here.
+ */
+ return;
+ }
+ ACCESS_ONCE(prev->next) = node;
+
+ /* Wait until the lock holder passes the lock down. */
+ arch_mcs_spin_lock_contended(&node->locked);
+}
+
+/*
+ * Releases the lock. The caller should pass in the corresponding node that
+ * was used to acquire the lock.
+ */
+static inline
+void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+ struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+
+ if (likely(!next)) {
+ /*
+ * Release the lock by setting it to NULL
+ */
+ if (likely(cmpxchg(lock, node, NULL) == node))
+ return;
+ /* Wait until the next pointer is set */
+ while (!(next = ACCESS_ONCE(node->next)))
+ arch_mutex_cpu_relax();
+ }
+
+ /* Pass lock to next waiter. */
+ arch_mcs_spin_unlock_contended(&next->locked);
+}
+
+/*
+ * Cancellable version of the MCS lock above.
+ *
+ * Intended for adaptive spinning of sleeping locks:
+ * mutex_lock()/rwsem_down_{read,write}() etc.
+ */
+
+struct optimistic_spin_queue {
+ struct optimistic_spin_queue *next, *prev;
+ int locked; /* 1 if lock acquired */
+};
+
+extern bool osq_lock(struct optimistic_spin_queue **lock);
+extern void osq_unlock(struct optimistic_spin_queue **lock);
+
+#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index faf6f5b..e1191c9 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -83,6 +83,12 @@
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock);
+
+ /*
+ * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
+ * mutexes so that we can do it here after we've verified state.
+ */
+ atomic_set(&lock->count, 1);
}
void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4dd6e4c..bc73d33 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
+#include "mcs_spinlock.h"
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -33,6 +34,13 @@
#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic/mutex-null.h>
+/*
+ * Must be 0 for the debug case so we do not do the unlock outside of the
+ * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
+ * case.
+ */
+# undef __mutex_slowpath_needs_to_unlock
+# define __mutex_slowpath_needs_to_unlock() 0
#else
# include "mutex.h"
# include <asm/mutex.h>
@@ -52,7 +60,7 @@
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- lock->spin_mlock = NULL;
+ lock->osq = NULL;
#endif
debug_mutex_init(lock, name, key);
@@ -67,8 +75,7 @@
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-static __used noinline void __sched
-__mutex_lock_slowpath(atomic_t *lock_count);
+__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
/**
* mutex_lock - acquire the mutex
@@ -111,54 +118,7 @@
* more or less simultaneously, the spinners need to acquire a MCS lock
* first before spinning on the owner field.
*
- * We don't inline mspin_lock() so that perf can correctly account for the
- * time spent in this lock function.
*/
-struct mspin_node {
- struct mspin_node *next ;
- int locked; /* 1 if lock acquired */
-};
-#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
-
-static noinline
-void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
-{
- struct mspin_node *prev;
-
- /* Init node */
- node->locked = 0;
- node->next = NULL;
-
- prev = xchg(lock, node);
- if (likely(prev == NULL)) {
- /* Lock acquired */
- node->locked = 1;
- return;
- }
- ACCESS_ONCE(prev->next) = node;
- smp_wmb();
- /* Wait until the lock holder passes the lock down */
- while (!ACCESS_ONCE(node->locked))
- arch_mutex_cpu_relax();
-}
-
-static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
-{
- struct mspin_node *next = ACCESS_ONCE(node->next);
-
- if (likely(!next)) {
- /*
- * Release the lock by setting it to NULL
- */
- if (cmpxchg(lock, node, NULL) == node)
- return;
- /* Wait until the next pointer is set */
- while (!(next = ACCESS_ONCE(node->next)))
- arch_mutex_cpu_relax();
- }
- ACCESS_ONCE(next->locked) = 1;
- smp_wmb();
-}
/*
* Mutex spinning code migrated from kernel/sched/core.c
@@ -212,6 +172,9 @@
struct task_struct *owner;
int retval = 1;
+ if (need_resched())
+ return 0;
+
rcu_read_lock();
owner = ACCESS_ONCE(lock->owner);
if (owner)
@@ -225,7 +188,8 @@
}
#endif
-static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+__visible __used noinline
+void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/**
* mutex_unlock - release the mutex
@@ -446,9 +410,11 @@
if (!mutex_can_spin_on_owner(lock))
goto slowpath;
+ if (!osq_lock(&lock->osq))
+ goto slowpath;
+
for (;;) {
struct task_struct *owner;
- struct mspin_node node;
if (use_ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
@@ -463,19 +429,16 @@
* performed the optimistic spinning cannot be done.
*/
if (ACCESS_ONCE(ww->ctx))
- goto slowpath;
+ break;
}
/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
- mspin_lock(MLOCK(lock), &node);
owner = ACCESS_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner)) {
- mspin_unlock(MLOCK(lock), &node);
- goto slowpath;
- }
+ if (owner && !mutex_spin_on_owner(lock, owner))
+ break;
if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
@@ -488,11 +451,10 @@
}
mutex_set_owner(lock);
- mspin_unlock(MLOCK(lock), &node);
+ osq_unlock(&lock->osq);
preempt_enable();
return 0;
}
- mspin_unlock(MLOCK(lock), &node);
/*
* When there's no owner, we might have preempted between the
@@ -501,7 +463,7 @@
* the owner complete.
*/
if (!owner && (need_resched() || rt_task(task)))
- goto slowpath;
+ break;
/*
* The cpu_relax() call is a compiler barrier which forces
@@ -511,7 +473,15 @@
*/
arch_mutex_cpu_relax();
}
+ osq_unlock(&lock->osq);
slowpath:
+ /*
+ * If we fell out of the spin path because of need_resched(),
+ * reschedule now, before we try-lock the mutex. This avoids getting
+ * scheduled out right after we obtained the mutex.
+ */
+ if (need_resched())
+ schedule_preempt_disabled();
#endif
spin_lock_mutex(&lock->wait_lock, flags);
@@ -717,10 +687,6 @@
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
- spin_lock_mutex(&lock->wait_lock, flags);
- mutex_release(&lock->dep_map, nested, _RET_IP_);
- debug_mutex_unlock(lock);
-
/*
* some architectures leave the lock unlocked in the fastpath failure
* case, others need to leave it locked. In the later case we have to
@@ -729,6 +695,10 @@
if (__mutex_slowpath_needs_to_unlock())
atomic_set(&lock->count, 1);
+ spin_lock_mutex(&lock->wait_lock, flags);
+ mutex_release(&lock->dep_map, nested, _RET_IP_);
+ debug_mutex_unlock(lock);
+
if (!list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
struct mutex_waiter *waiter =
@@ -746,7 +716,7 @@
/*
* Release the lock, slowpath:
*/
-static __used noinline void
+__visible void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
@@ -803,7 +773,7 @@
}
EXPORT_SYMBOL(mutex_lock_killable);
-static __used noinline void __sched
+__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2e960a2..aa4dff0 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -213,6 +213,18 @@
}
/*
+ * Called by sched_setscheduler() to check whether the priority change
+ * is overruled by a possible priority boosting.
+ */
+int rt_mutex_check_prio(struct task_struct *task, int newprio)
+{
+ if (!task_has_pi_waiters(task))
+ return 0;
+
+ return task_top_pi_waiter(task)->task->prio <= newprio;
+}
+
+/*
* Adjust the priority of a task, after its pi_waiters got modified.
*
* This can be both boosting and unboosting. task->pi_lock must be held.
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 19c5fa9..1d66e08 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -143,6 +143,7 @@
/*
* wait for the read lock to be granted
*/
+__visible
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
@@ -190,6 +191,7 @@
/*
* wait until we successfully acquire the write lock
*/
+__visible
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
{
long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
@@ -252,6 +254,7 @@
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
+__visible
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
@@ -272,6 +275,7 @@
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
+__visible
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
diff --git a/kernel/module.c b/kernel/module.c
index d24fcf2..8dc7f5e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1015,7 +1015,7 @@
buf[l++] = 'C';
/*
* TAINT_FORCED_RMMOD: could be added.
- * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
+ * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
* apply to modules.
*/
return l;
@@ -1948,6 +1948,10 @@
switch (sym[i].st_shndx) {
case SHN_COMMON:
+ /* Ignore common symbols */
+ if (!strncmp(name, "__gnu_lto", 9))
+ break;
+
/* We compiled with -fno-common. These are not
supposed to happen. */
pr_debug("Common symbol: %s\n", name);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 2d5cc4c..db4c8b0 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -309,7 +309,7 @@
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
- if (rcu_dereference_raw(nh->head)) {
+ if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
nr_calls);
diff --git a/kernel/panic.c b/kernel/panic.c
index 6d63003..cca8a91 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -199,7 +199,7 @@
static const struct tnt tnts[] = {
{ TAINT_PROPRIETARY_MODULE, 'P', 'G' },
{ TAINT_FORCED_MODULE, 'F', ' ' },
- { TAINT_UNSAFE_SMP, 'S', ' ' },
+ { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' },
{ TAINT_FORCED_RMMOD, 'R', ' ' },
{ TAINT_MACHINE_CHECK, 'M', ' ' },
{ TAINT_BAD_PAGE, 'B', ' ' },
@@ -459,7 +459,7 @@
* Called when gcc's -fstack-protector feature is used, and
* gcc detects corruption of the on-stack canary value
*/
-void __stack_chk_fail(void)
+__visible void __stack_chk_fail(void)
{
panic("stack-protector: Kernel stack is corrupted in: %p\n",
__builtin_return_address(0));
diff --git a/kernel/power/console.c b/kernel/power/console.c
index eacb8bd8..aba9c54 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,6 +9,7 @@
#include <linux/kbd_kern.h>
#include <linux/vt.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "power.h"
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
diff --git a/kernel/profile.c b/kernel/profile.c
index 6631e1e..ebdd9c1 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -549,14 +549,14 @@
struct page *page;
page = alloc_pages_exact_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0);
if (!page)
goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page);
page = alloc_pages_exact_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0);
if (!page)
goto out_cleanup;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1f4bcb3..adf9862 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -1180,8 +1180,8 @@
return ret;
}
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data)
+COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+ compat_long_t, addr, compat_long_t, data)
{
struct task_struct *child;
long ret;
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 01e9ec3..807ccfb 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,5 +1,5 @@
obj-y += update.o srcu.o
-obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o
+obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += tree.o
obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 79c3877..bfda272 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2011
*
@@ -23,6 +23,7 @@
#ifndef __LINUX_RCU_H
#define __LINUX_RCU_H
+#include <trace/events/rcu.h>
#ifdef CONFIG_RCU_TRACE
#define RCU_TRACE(stmt) stmt
#else /* #ifdef CONFIG_RCU_TRACE */
@@ -116,8 +117,6 @@
}
}
-extern int rcu_expedited;
-
#ifdef CONFIG_RCU_STALL_COMMON
extern int rcu_cpu_stall_suppress;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
new file mode 100644
index 0000000..bd30bc6
--- /dev/null
+++ b/kernel/rcu/rcutorture.c
@@ -0,0 +1,1582 @@
+/*
+ * Read-Copy Update module-based torture test facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2005, 2006
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ * Josh Triplett <josh@freedesktop.org>
+ *
+ * See also: Documentation/RCU/torture.txt
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/srcu.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+
+
+torture_param(int, fqs_duration, 0,
+ "Duration of fqs bursts (us), 0 to disable");
+torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
+torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
+torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
+torture_param(bool, gp_normal, false,
+ "Use normal (non-expedited) GP wait primitives");
+torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
+torture_param(int, n_barrier_cbs, 0,
+ "# of callbacks/kthreads for barrier testing");
+torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
+torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, object_debug, 0,
+ "Enable debug-object double call_rcu() testing");
+torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
+torture_param(int, onoff_interval, 0,
+ "Time between CPU hotplugs (s), 0=disable");
+torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
+torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
+torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
+torture_param(int, stall_cpu_holdoff, 10,
+ "Time to wait before starting stall (s).");
+torture_param(int, stat_interval, 60,
+ "Number of seconds between stats printk()s");
+torture_param(int, stutter, 5, "Number of seconds to run/halt test");
+torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
+torture_param(int, test_boost_duration, 4,
+ "Duration of each boost test, seconds.");
+torture_param(int, test_boost_interval, 7,
+ "Interval between boost tests, seconds.");
+torture_param(bool, test_no_idle_hz, true,
+ "Test support for tickless idle CPUs");
+torture_param(bool, verbose, true,
+ "Enable verbose debugging printk()s");
+
+static char *torture_type = "rcu";
+module_param(torture_type, charp, 0444);
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
+
+static int nrealreaders;
+static struct task_struct *writer_task;
+static struct task_struct **fakewriter_tasks;
+static struct task_struct **reader_tasks;
+static struct task_struct *stats_task;
+static struct task_struct *fqs_task;
+static struct task_struct *boost_tasks[NR_CPUS];
+static struct task_struct *stall_task;
+static struct task_struct **barrier_cbs_tasks;
+static struct task_struct *barrier_task;
+
+#define RCU_TORTURE_PIPE_LEN 10
+
+struct rcu_torture {
+ struct rcu_head rtort_rcu;
+ int rtort_pipe_count;
+ struct list_head rtort_free;
+ int rtort_mbtest;
+};
+
+static LIST_HEAD(rcu_torture_freelist);
+static struct rcu_torture __rcu *rcu_torture_current;
+static unsigned long rcu_torture_current_version;
+static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
+static DEFINE_SPINLOCK(rcu_torture_lock);
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
+ rcu_torture_count) = { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
+ rcu_torture_batch) = { 0 };
+static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+static atomic_t n_rcu_torture_alloc;
+static atomic_t n_rcu_torture_alloc_fail;
+static atomic_t n_rcu_torture_free;
+static atomic_t n_rcu_torture_mberror;
+static atomic_t n_rcu_torture_error;
+static long n_rcu_torture_barrier_error;
+static long n_rcu_torture_boost_ktrerror;
+static long n_rcu_torture_boost_rterror;
+static long n_rcu_torture_boost_failure;
+static long n_rcu_torture_boosts;
+static long n_rcu_torture_timers;
+static long n_barrier_attempts;
+static long n_barrier_successes;
+static struct list_head rcu_torture_removed;
+
+#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
+#define RCUTORTURE_RUNNABLE_INIT 1
+#else
+#define RCUTORTURE_RUNNABLE_INIT 0
+#endif
+int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+module_param(rcutorture_runnable, int, 0444);
+MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
+
+#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
+#define rcu_can_boost() 1
+#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
+#define rcu_can_boost() 0
+#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
+
+#ifdef CONFIG_RCU_TRACE
+static u64 notrace rcu_trace_clock_local(void)
+{
+ u64 ts = trace_clock_local();
+ unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
+ return ts;
+}
+#else /* #ifdef CONFIG_RCU_TRACE */
+static u64 notrace rcu_trace_clock_local(void)
+{
+ return 0ULL;
+}
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+static unsigned long boost_starttime; /* jiffies of next boost test start. */
+DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
+ /* and boost task create/destroy. */
+static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
+static bool barrier_phase; /* Test phase. */
+static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
+static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
+static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
+
+/*
+ * Allocate an element from the rcu_tortures pool.
+ */
+static struct rcu_torture *
+rcu_torture_alloc(void)
+{
+ struct list_head *p;
+
+ spin_lock_bh(&rcu_torture_lock);
+ if (list_empty(&rcu_torture_freelist)) {
+ atomic_inc(&n_rcu_torture_alloc_fail);
+ spin_unlock_bh(&rcu_torture_lock);
+ return NULL;
+ }
+ atomic_inc(&n_rcu_torture_alloc);
+ p = rcu_torture_freelist.next;
+ list_del_init(p);
+ spin_unlock_bh(&rcu_torture_lock);
+ return container_of(p, struct rcu_torture, rtort_free);
+}
+
+/*
+ * Free an element to the rcu_tortures pool.
+ */
+static void
+rcu_torture_free(struct rcu_torture *p)
+{
+ atomic_inc(&n_rcu_torture_free);
+ spin_lock_bh(&rcu_torture_lock);
+ list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+ spin_unlock_bh(&rcu_torture_lock);
+}
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_torture_ops {
+ void (*init)(void);
+ int (*readlock)(void);
+ void (*read_delay)(struct torture_random_state *rrsp);
+ void (*readunlock)(int idx);
+ int (*completed)(void);
+ void (*deferred_free)(struct rcu_torture *p);
+ void (*sync)(void);
+ void (*exp_sync)(void);
+ void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+ void (*cb_barrier)(void);
+ void (*fqs)(void);
+ void (*stats)(char *page);
+ int irq_capable;
+ int can_boost;
+ const char *name;
+};
+
+static struct rcu_torture_ops *cur_ops;
+
+/*
+ * Definitions for rcu torture testing.
+ */
+
+static int rcu_torture_read_lock(void) __acquires(RCU)
+{
+ rcu_read_lock();
+ return 0;
+}
+
+static void rcu_read_delay(struct torture_random_state *rrsp)
+{
+ const unsigned long shortdelay_us = 200;
+ const unsigned long longdelay_ms = 50;
+
+ /* We want a short delay sometimes to make a reader delay the grace
+ * period, and we want a long delay occasionally to trigger
+ * force_quiescent_state. */
+
+ if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms);
+ if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
+ udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+ if (!preempt_count() &&
+ !(torture_random(rrsp) % (nrealreaders * 20000)))
+ preempt_schedule(); /* No QS if preempt_disable() in effect */
+#endif
+}
+
+static void rcu_torture_read_unlock(int idx) __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int rcu_torture_completed(void)
+{
+ return rcu_batches_completed();
+}
+
+static void
+rcu_torture_cb(struct rcu_head *p)
+{
+ int i;
+ struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
+
+ if (torture_must_stop_irq()) {
+ /* Test is ending, just drop callbacks on the floor. */
+ /* The next initialization will pick up the pieces. */
+ return;
+ }
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+ } else {
+ cur_ops->deferred_free(rp);
+ }
+}
+
+static int rcu_no_completed(void)
+{
+ return 0;
+}
+
+static void rcu_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static void rcu_sync_torture_init(void)
+{
+ INIT_LIST_HEAD(&rcu_torture_removed);
+}
+
+static struct rcu_torture_ops rcu_ops = {
+ .init = rcu_sync_torture_init,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferred_free = rcu_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .exp_sync = synchronize_rcu_expedited,
+ .call = call_rcu,
+ .cb_barrier = rcu_barrier,
+ .fqs = rcu_force_quiescent_state,
+ .stats = NULL,
+ .irq_capable = 1,
+ .can_boost = rcu_can_boost(),
+ .name = "rcu"
+};
+
+/*
+ * Definitions for rcu_bh torture testing.
+ */
+
+static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
+{
+ rcu_read_lock_bh();
+ return 0;
+}
+
+static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
+{
+ rcu_read_unlock_bh();
+}
+
+static int rcu_bh_torture_completed(void)
+{
+ return rcu_batches_completed_bh();
+}
+
+static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_bh_ops = {
+ .init = rcu_sync_torture_init,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_bh_torture_deferred_free,
+ .sync = synchronize_rcu_bh,
+ .exp_sync = synchronize_rcu_bh_expedited,
+ .call = call_rcu_bh,
+ .cb_barrier = rcu_barrier_bh,
+ .fqs = rcu_bh_force_quiescent_state,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh"
+};
+
+/*
+ * Don't even think about trying any of these in real life!!!
+ * The names includes "busted", and they really means it!
+ * The only purpose of these functions is to provide a buggy RCU
+ * implementation to make sure that rcutorture correctly emits
+ * buggy-RCU error messages.
+ */
+static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
+{
+ /* This is a deliberate bug for testing purposes only! */
+ rcu_torture_cb(&p->rtort_rcu);
+}
+
+static void synchronize_rcu_busted(void)
+{
+ /* This is a deliberate bug for testing purposes only! */
+}
+
+static void
+call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ /* This is a deliberate bug for testing purposes only! */
+ func(head);
+}
+
+static struct rcu_torture_ops rcu_busted_ops = {
+ .init = rcu_sync_torture_init,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_no_completed,
+ .deferred_free = rcu_busted_torture_deferred_free,
+ .sync = synchronize_rcu_busted,
+ .exp_sync = synchronize_rcu_busted,
+ .call = call_rcu_busted,
+ .cb_barrier = NULL,
+ .fqs = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_busted"
+};
+
+/*
+ * Definitions for srcu torture testing.
+ */
+
+DEFINE_STATIC_SRCU(srcu_ctl);
+
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
+{
+ return srcu_read_lock(&srcu_ctl);
+}
+
+static void srcu_read_delay(struct torture_random_state *rrsp)
+{
+ long delay;
+ const long uspertick = 1000000 / HZ;
+ const long longdelay = 10;
+
+ /* We want there to be long-running readers, but not all the time. */
+
+ delay = torture_random(rrsp) %
+ (nrealreaders * 2 * longdelay * uspertick);
+ if (!delay)
+ schedule_timeout_interruptible(longdelay);
+ else
+ rcu_read_delay(rrsp);
+}
+
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
+{
+ srcu_read_unlock(&srcu_ctl, idx);
+}
+
+static int srcu_torture_completed(void)
+{
+ return srcu_batches_completed(&srcu_ctl);
+}
+
+static void srcu_torture_deferred_free(struct rcu_torture *rp)
+{
+ call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
+}
+
+static void srcu_torture_synchronize(void)
+{
+ synchronize_srcu(&srcu_ctl);
+}
+
+static void srcu_torture_call(struct rcu_head *head,
+ void (*func)(struct rcu_head *head))
+{
+ call_srcu(&srcu_ctl, head, func);
+}
+
+static void srcu_torture_barrier(void)
+{
+ srcu_barrier(&srcu_ctl);
+}
+
+static void srcu_torture_stats(char *page)
+{
+ int cpu;
+ int idx = srcu_ctl.completed & 0x1;
+
+ page += sprintf(page, "%s%s per-CPU(idx=%d):",
+ torture_type, TORTURE_FLAG, idx);
+ for_each_possible_cpu(cpu) {
+ page += sprintf(page, " %d(%lu,%lu)", cpu,
+ per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
+ per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
+ }
+ sprintf(page, "\n");
+}
+
+static void srcu_torture_synchronize_expedited(void)
+{
+ synchronize_srcu_expedited(&srcu_ctl);
+}
+
+static struct rcu_torture_ops srcu_ops = {
+ .init = rcu_sync_torture_init,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .completed = srcu_torture_completed,
+ .deferred_free = srcu_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .exp_sync = srcu_torture_synchronize_expedited,
+ .call = srcu_torture_call,
+ .cb_barrier = srcu_torture_barrier,
+ .stats = srcu_torture_stats,
+ .name = "srcu"
+};
+
+/*
+ * Definitions for sched torture testing.
+ */
+
+static int sched_torture_read_lock(void)
+{
+ preempt_disable();
+ return 0;
+}
+
+static void sched_torture_read_unlock(int idx)
+{
+ preempt_enable();
+}
+
+static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops sched_ops = {
+ .init = rcu_sync_torture_init,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = rcu_no_completed,
+ .deferred_free = rcu_sched_torture_deferred_free,
+ .sync = synchronize_sched,
+ .exp_sync = synchronize_sched_expedited,
+ .call = call_rcu_sched,
+ .cb_barrier = rcu_barrier_sched,
+ .fqs = rcu_sched_force_quiescent_state,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "sched"
+};
+
+/*
+ * RCU torture priority-boost testing. Runs one real-time thread per
+ * CPU for moderate bursts, repeatedly registering RCU callbacks and
+ * spinning waiting for them to be invoked. If a given callback takes
+ * too long to be invoked, we assume that priority inversion has occurred.
+ */
+
+struct rcu_boost_inflight {
+ struct rcu_head rcu;
+ int inflight;
+};
+
+static void rcu_torture_boost_cb(struct rcu_head *head)
+{
+ struct rcu_boost_inflight *rbip =
+ container_of(head, struct rcu_boost_inflight, rcu);
+
+ smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
+ rbip->inflight = 0;
+}
+
+static int rcu_torture_boost(void *arg)
+{
+ unsigned long call_rcu_time;
+ unsigned long endtime;
+ unsigned long oldstarttime;
+ struct rcu_boost_inflight rbi = { .inflight = 0 };
+ struct sched_param sp;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_boost started");
+
+ /* Set real-time priority. */
+ sp.sched_priority = 1;
+ if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
+ VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
+ n_rcu_torture_boost_rterror++;
+ }
+
+ init_rcu_head_on_stack(&rbi.rcu);
+ /* Each pass through the following loop does one boost-test cycle. */
+ do {
+ /* Wait for the next test interval. */
+ oldstarttime = boost_starttime;
+ while (ULONG_CMP_LT(jiffies, oldstarttime)) {
+ schedule_timeout_interruptible(oldstarttime - jiffies);
+ stutter_wait("rcu_torture_boost");
+ if (torture_must_stop())
+ goto checkwait;
+ }
+
+ /* Do one boost-test interval. */
+ endtime = oldstarttime + test_boost_duration * HZ;
+ call_rcu_time = jiffies;
+ while (ULONG_CMP_LT(jiffies, endtime)) {
+ /* If we don't have a callback in flight, post one. */
+ if (!rbi.inflight) {
+ smp_mb(); /* RCU core before ->inflight = 1. */
+ rbi.inflight = 1;
+ call_rcu(&rbi.rcu, rcu_torture_boost_cb);
+ if (jiffies - call_rcu_time >
+ test_boost_duration * HZ - HZ / 2) {
+ VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+ n_rcu_torture_boost_failure++;
+ }
+ call_rcu_time = jiffies;
+ }
+ cond_resched();
+ stutter_wait("rcu_torture_boost");
+ if (torture_must_stop())
+ goto checkwait;
+ }
+
+ /*
+ * Set the start time of the next test interval.
+ * Yes, this is vulnerable to long delays, but such
+ * delays simply cause a false negative for the next
+ * interval. Besides, we are running at RT priority,
+ * so delays should be relatively rare.
+ */
+ while (oldstarttime == boost_starttime &&
+ !kthread_should_stop()) {
+ if (mutex_trylock(&boost_mutex)) {
+ boost_starttime = jiffies +
+ test_boost_interval * HZ;
+ n_rcu_torture_boosts++;
+ mutex_unlock(&boost_mutex);
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Go do the stutter. */
+checkwait: stutter_wait("rcu_torture_boost");
+ } while (!torture_must_stop());
+
+ /* Clean up and exit. */
+ while (!kthread_should_stop() || rbi.inflight) {
+ torture_shutdown_absorb("rcu_torture_boost");
+ schedule_timeout_uninterruptible(1);
+ }
+ smp_mb(); /* order accesses to ->inflight before stack-frame death. */
+ destroy_rcu_head_on_stack(&rbi.rcu);
+ torture_kthread_stopping("rcu_torture_boost");
+ return 0;
+}
+
+/*
+ * RCU torture force-quiescent-state kthread. Repeatedly induces
+ * bursts of calls to force_quiescent_state(), increasing the probability
+ * of occurrence of some important types of race conditions.
+ */
+static int
+rcu_torture_fqs(void *arg)
+{
+ unsigned long fqs_resume_time;
+ int fqs_burst_remaining;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
+ do {
+ fqs_resume_time = jiffies + fqs_stutter * HZ;
+ while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
+ !kthread_should_stop()) {
+ schedule_timeout_interruptible(1);
+ }
+ fqs_burst_remaining = fqs_duration;
+ while (fqs_burst_remaining > 0 &&
+ !kthread_should_stop()) {
+ cur_ops->fqs();
+ udelay(fqs_holdoff);
+ fqs_burst_remaining -= fqs_holdoff;
+ }
+ stutter_wait("rcu_torture_fqs");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_fqs");
+ return 0;
+}
+
+/*
+ * RCU torture writer kthread. Repeatedly substitutes a new structure
+ * for that pointed to by rcu_torture_current, freeing the old structure
+ * after a series of grace periods (the "pipeline").
+ */
+static int
+rcu_torture_writer(void *arg)
+{
+ bool exp;
+ int i;
+ struct rcu_torture *rp;
+ struct rcu_torture *rp1;
+ struct rcu_torture *old_rp;
+ static DEFINE_TORTURE_RANDOM(rand);
+
+ VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
+ set_user_nice(current, MAX_NICE);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ rp = rcu_torture_alloc();
+ if (rp == NULL)
+ continue;
+ rp->rtort_pipe_count = 0;
+ udelay(torture_random(&rand) & 0x3ff);
+ old_rp = rcu_dereference_check(rcu_torture_current,
+ current == writer_task);
+ rp->rtort_mbtest = 1;
+ rcu_assign_pointer(rcu_torture_current, rp);
+ smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
+ if (old_rp) {
+ i = old_rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ old_rp->rtort_pipe_count++;
+ if (gp_normal == gp_exp)
+ exp = !!(torture_random(&rand) & 0x80);
+ else
+ exp = gp_exp;
+ if (!exp) {
+ cur_ops->deferred_free(old_rp);
+ } else {
+ cur_ops->exp_sync();
+ list_add(&old_rp->rtort_free,
+ &rcu_torture_removed);
+ list_for_each_entry_safe(rp, rp1,
+ &rcu_torture_removed,
+ rtort_free) {
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >=
+ RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+ rcu_torture_free(rp);
+ }
+ }
+ }
+ }
+ rcutorture_record_progress(++rcu_torture_current_version);
+ stutter_wait("rcu_torture_writer");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_writer");
+ return 0;
+}
+
+/*
+ * RCU torture fake writer kthread. Repeatedly calls sync, with a random
+ * delay between calls.
+ */
+static int
+rcu_torture_fakewriter(void *arg)
+{
+ DEFINE_TORTURE_RANDOM(rand);
+
+ VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
+ set_user_nice(current, MAX_NICE);
+
+ do {
+ schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
+ udelay(torture_random(&rand) & 0x3ff);
+ if (cur_ops->cb_barrier != NULL &&
+ torture_random(&rand) % (nfakewriters * 8) == 0) {
+ cur_ops->cb_barrier();
+ } else if (gp_normal == gp_exp) {
+ if (torture_random(&rand) & 0x80)
+ cur_ops->sync();
+ else
+ cur_ops->exp_sync();
+ } else if (gp_normal) {
+ cur_ops->sync();
+ } else {
+ cur_ops->exp_sync();
+ }
+ stutter_wait("rcu_torture_fakewriter");
+ } while (!torture_must_stop());
+
+ torture_kthread_stopping("rcu_torture_fakewriter");
+ return 0;
+}
+
+void rcutorture_trace_dump(void)
+{
+ static atomic_t beenhere = ATOMIC_INIT(0);
+
+ if (atomic_read(&beenhere))
+ return;
+ if (atomic_xchg(&beenhere, 1) != 0)
+ return;
+ ftrace_dump(DUMP_ALL);
+}
+
+/*
+ * RCU torture reader from timer handler. Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array. The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(unsigned long unused)
+{
+ int idx;
+ int completed;
+ int completed_end;
+ static DEFINE_TORTURE_RANDOM(rand);
+ static DEFINE_SPINLOCK(rand_lock);
+ struct rcu_torture *p;
+ int pipe_count;
+ unsigned long long ts;
+
+ idx = cur_ops->readlock();
+ completed = cur_ops->completed();
+ ts = rcu_trace_clock_local();
+ p = rcu_dereference_check(rcu_torture_current,
+ rcu_read_lock_bh_held() ||
+ rcu_read_lock_sched_held() ||
+ srcu_read_lock_held(&srcu_ctl));
+ if (p == NULL) {
+ /* Leave because rcu_torture_writer is not yet underway */
+ cur_ops->readunlock(idx);
+ return;
+ }
+ if (p->rtort_mbtest == 0)
+ atomic_inc(&n_rcu_torture_mberror);
+ spin_lock(&rand_lock);
+ cur_ops->read_delay(&rand);
+ n_rcu_torture_timers++;
+ spin_unlock(&rand_lock);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+ if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ pipe_count = RCU_TORTURE_PIPE_LEN;
+ }
+ completed_end = cur_ops->completed();
+ if (pipe_count > 1) {
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
+ completed, completed_end);
+ rcutorture_trace_dump();
+ }
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
+ completed = completed_end - completed;
+ if (completed > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ completed = RCU_TORTURE_PIPE_LEN;
+ }
+ __this_cpu_inc(rcu_torture_batch[completed]);
+ preempt_enable();
+ cur_ops->readunlock(idx);
+}
+
+/*
+ * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array. The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static int
+rcu_torture_reader(void *arg)
+{
+ int completed;
+ int completed_end;
+ int idx;
+ DEFINE_TORTURE_RANDOM(rand);
+ struct rcu_torture *p;
+ int pipe_count;
+ struct timer_list t;
+ unsigned long long ts;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
+ set_user_nice(current, MAX_NICE);
+ if (irqreader && cur_ops->irq_capable)
+ setup_timer_on_stack(&t, rcu_torture_timer, 0);
+
+ do {
+ if (irqreader && cur_ops->irq_capable) {
+ if (!timer_pending(&t))
+ mod_timer(&t, jiffies + 1);
+ }
+ idx = cur_ops->readlock();
+ completed = cur_ops->completed();
+ ts = rcu_trace_clock_local();
+ p = rcu_dereference_check(rcu_torture_current,
+ rcu_read_lock_bh_held() ||
+ rcu_read_lock_sched_held() ||
+ srcu_read_lock_held(&srcu_ctl));
+ if (p == NULL) {
+ /* Wait for rcu_torture_writer to get underway */
+ cur_ops->readunlock(idx);
+ schedule_timeout_interruptible(HZ);
+ continue;
+ }
+ if (p->rtort_mbtest == 0)
+ atomic_inc(&n_rcu_torture_mberror);
+ cur_ops->read_delay(&rand);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+ if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ pipe_count = RCU_TORTURE_PIPE_LEN;
+ }
+ completed_end = cur_ops->completed();
+ if (pipe_count > 1) {
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+ ts, completed, completed_end);
+ rcutorture_trace_dump();
+ }
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
+ completed = completed_end - completed;
+ if (completed > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ completed = RCU_TORTURE_PIPE_LEN;
+ }
+ __this_cpu_inc(rcu_torture_batch[completed]);
+ preempt_enable();
+ cur_ops->readunlock(idx);
+ schedule();
+ stutter_wait("rcu_torture_reader");
+ } while (!torture_must_stop());
+ if (irqreader && cur_ops->irq_capable)
+ del_timer_sync(&t);
+ torture_kthread_stopping("rcu_torture_reader");
+ return 0;
+}
+
+/*
+ * Create an RCU-torture statistics message in the specified buffer.
+ */
+static void
+rcu_torture_printk(char *page)
+{
+ int cpu;
+ int i;
+ long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
+ long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
+ batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
+ }
+ }
+ for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
+ if (pipesummary[i] != 0)
+ break;
+ }
+ page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page,
+ "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+ atomic_read(&n_rcu_torture_alloc),
+ atomic_read(&n_rcu_torture_alloc_fail),
+ atomic_read(&n_rcu_torture_free));
+ page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
+ atomic_read(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror);
+ page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
+ n_rcu_torture_boost_failure,
+ n_rcu_torture_boosts,
+ n_rcu_torture_timers);
+ page = torture_onoff_stats(page);
+ page += sprintf(page, "barrier: %ld/%ld:%ld",
+ n_barrier_successes,
+ n_barrier_attempts,
+ n_rcu_torture_barrier_error);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+ if (atomic_read(&n_rcu_torture_mberror) != 0 ||
+ n_rcu_torture_barrier_error != 0 ||
+ n_rcu_torture_boost_ktrerror != 0 ||
+ n_rcu_torture_boost_rterror != 0 ||
+ n_rcu_torture_boost_failure != 0 ||
+ i > 1) {
+ page += sprintf(page, "!!! ");
+ atomic_inc(&n_rcu_torture_error);
+ WARN_ON_ONCE(1);
+ }
+ page += sprintf(page, "Reader Pipe: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ page += sprintf(page, " %ld", pipesummary[i]);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page, "Reader Batch: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ page += sprintf(page, " %ld", batchsummary[i]);
+ page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+ page += sprintf(page, "Free-Block Circulation: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ page += sprintf(page, " %d",
+ atomic_read(&rcu_torture_wcount[i]));
+ }
+ page += sprintf(page, "\n");
+ if (cur_ops->stats)
+ cur_ops->stats(page);
+}
+
+/*
+ * Print torture statistics. Caller must ensure that there is only
+ * one call to this function at a given time!!! This is normally
+ * accomplished by relying on the module system to only have one copy
+ * of the module loaded, and then by giving the rcu_torture_stats
+ * kthread full control (or the init/cleanup functions when rcu_torture_stats
+ * thread is not running).
+ */
+static void
+rcu_torture_stats_print(void)
+{
+ int size = nr_cpu_ids * 200 + 8192;
+ char *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("rcu-torture: Out of memory, need: %d", size);
+ return;
+ }
+ rcu_torture_printk(buf);
+ pr_alert("%s", buf);
+ kfree(buf);
+}
+
+/*
+ * Periodically prints torture statistics, if periodic statistics printing
+ * was specified via the stat_interval module parameter.
+ */
+static int
+rcu_torture_stats(void *arg)
+{
+ VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
+ do {
+ schedule_timeout_interruptible(stat_interval * HZ);
+ rcu_torture_stats_print();
+ torture_shutdown_absorb("rcu_torture_stats");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_stats");
+ return 0;
+}
+
+static inline void
+rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
+{
+ pr_alert("%s" TORTURE_FLAG
+ "--- %s: nreaders=%d nfakewriters=%d "
+ "stat_interval=%d verbose=%d test_no_idle_hz=%d "
+ "shuffle_interval=%d stutter=%d irqreader=%d "
+ "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
+ "test_boost=%d/%d test_boost_interval=%d "
+ "test_boost_duration=%d shutdown_secs=%d "
+ "stall_cpu=%d stall_cpu_holdoff=%d "
+ "n_barrier_cbs=%d "
+ "onoff_interval=%d onoff_holdoff=%d\n",
+ torture_type, tag, nrealreaders, nfakewriters,
+ stat_interval, verbose, test_no_idle_hz, shuffle_interval,
+ stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
+ test_boost, cur_ops->can_boost,
+ test_boost_interval, test_boost_duration, shutdown_secs,
+ stall_cpu, stall_cpu_holdoff,
+ n_barrier_cbs,
+ onoff_interval, onoff_holdoff);
+}
+
+static void rcutorture_booster_cleanup(int cpu)
+{
+ struct task_struct *t;
+
+ if (boost_tasks[cpu] == NULL)
+ return;
+ mutex_lock(&boost_mutex);
+ t = boost_tasks[cpu];
+ boost_tasks[cpu] = NULL;
+ mutex_unlock(&boost_mutex);
+
+ /* This must be outside of the mutex, otherwise deadlock! */
+ torture_stop_kthread(rcu_torture_boost, t);
+}
+
+static int rcutorture_booster_init(int cpu)
+{
+ int retval;
+
+ if (boost_tasks[cpu] != NULL)
+ return 0; /* Already created, nothing more to do. */
+
+ /* Don't allow time recalculation while creating a new task. */
+ mutex_lock(&boost_mutex);
+ VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
+ boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
+ cpu_to_node(cpu),
+ "rcu_torture_boost");
+ if (IS_ERR(boost_tasks[cpu])) {
+ retval = PTR_ERR(boost_tasks[cpu]);
+ VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
+ n_rcu_torture_boost_ktrerror++;
+ boost_tasks[cpu] = NULL;
+ mutex_unlock(&boost_mutex);
+ return retval;
+ }
+ kthread_bind(boost_tasks[cpu], cpu);
+ wake_up_process(boost_tasks[cpu]);
+ mutex_unlock(&boost_mutex);
+ return 0;
+}
+
+/*
+ * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
+ * induces a CPU stall for the time specified by stall_cpu.
+ */
+static int rcu_torture_stall(void *args)
+{
+ unsigned long stop_at;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
+ if (stall_cpu_holdoff > 0) {
+ VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
+ schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
+ VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
+ }
+ if (!kthread_should_stop()) {
+ stop_at = get_seconds() + stall_cpu;
+ /* RCU CPU stall is expected behavior in following code. */
+ pr_alert("rcu_torture_stall start.\n");
+ rcu_read_lock();
+ preempt_disable();
+ while (ULONG_CMP_LT(get_seconds(), stop_at))
+ continue; /* Induce RCU CPU stall warning. */
+ preempt_enable();
+ rcu_read_unlock();
+ pr_alert("rcu_torture_stall end.\n");
+ }
+ torture_shutdown_absorb("rcu_torture_stall");
+ while (!kthread_should_stop())
+ schedule_timeout_interruptible(10 * HZ);
+ return 0;
+}
+
+/* Spawn CPU-stall kthread, if stall_cpu specified. */
+static int __init rcu_torture_stall_init(void)
+{
+ if (stall_cpu <= 0)
+ return 0;
+ return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
+}
+
+/* Callback function for RCU barrier testing. */
+void rcu_torture_barrier_cbf(struct rcu_head *rcu)
+{
+ atomic_inc(&barrier_cbs_invoked);
+}
+
+/* kthread function to register callbacks used to test RCU barriers. */
+static int rcu_torture_barrier_cbs(void *arg)
+{
+ long myid = (long)arg;
+ bool lastphase = 0;
+ bool newphase;
+ struct rcu_head rcu;
+
+ init_rcu_head_on_stack(&rcu);
+ VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
+ set_user_nice(current, MAX_NICE);
+ do {
+ wait_event(barrier_cbs_wq[myid],
+ (newphase =
+ ACCESS_ONCE(barrier_phase)) != lastphase ||
+ torture_must_stop());
+ lastphase = newphase;
+ smp_mb(); /* ensure barrier_phase load before ->call(). */
+ if (torture_must_stop())
+ break;
+ cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+ if (atomic_dec_and_test(&barrier_cbs_count))
+ wake_up(&barrier_wq);
+ } while (!torture_must_stop());
+ cur_ops->cb_barrier();
+ destroy_rcu_head_on_stack(&rcu);
+ torture_kthread_stopping("rcu_torture_barrier_cbs");
+ return 0;
+}
+
+/* kthread function to drive and coordinate RCU barrier testing. */
+static int rcu_torture_barrier(void *arg)
+{
+ int i;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
+ do {
+ atomic_set(&barrier_cbs_invoked, 0);
+ atomic_set(&barrier_cbs_count, n_barrier_cbs);
+ smp_mb(); /* Ensure barrier_phase after prior assignments. */
+ barrier_phase = !barrier_phase;
+ for (i = 0; i < n_barrier_cbs; i++)
+ wake_up(&barrier_cbs_wq[i]);
+ wait_event(barrier_wq,
+ atomic_read(&barrier_cbs_count) == 0 ||
+ torture_must_stop());
+ if (torture_must_stop())
+ break;
+ n_barrier_attempts++;
+ cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
+ if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
+ n_rcu_torture_barrier_error++;
+ WARN_ON_ONCE(1);
+ }
+ n_barrier_successes++;
+ schedule_timeout_interruptible(HZ / 10);
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_barrier");
+ return 0;
+}
+
+/* Initialize RCU barrier testing. */
+static int rcu_torture_barrier_init(void)
+{
+ int i;
+ int ret;
+
+ if (n_barrier_cbs == 0)
+ return 0;
+ if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
+ pr_alert("%s" TORTURE_FLAG
+ " Call or barrier ops missing for %s,\n",
+ torture_type, cur_ops->name);
+ pr_alert("%s" TORTURE_FLAG
+ " RCU barrier testing omitted from run.\n",
+ torture_type);
+ return 0;
+ }
+ atomic_set(&barrier_cbs_count, 0);
+ atomic_set(&barrier_cbs_invoked, 0);
+ barrier_cbs_tasks =
+ kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
+ GFP_KERNEL);
+ barrier_cbs_wq =
+ kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
+ GFP_KERNEL);
+ if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
+ return -ENOMEM;
+ for (i = 0; i < n_barrier_cbs; i++) {
+ init_waitqueue_head(&barrier_cbs_wq[i]);
+ ret = torture_create_kthread(rcu_torture_barrier_cbs,
+ (void *)(long)i,
+ barrier_cbs_tasks[i]);
+ if (ret)
+ return ret;
+ }
+ return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
+}
+
+/* Clean up after RCU barrier testing. */
+static void rcu_torture_barrier_cleanup(void)
+{
+ int i;
+
+ torture_stop_kthread(rcu_torture_barrier, barrier_task);
+ if (barrier_cbs_tasks != NULL) {
+ for (i = 0; i < n_barrier_cbs; i++)
+ torture_stop_kthread(rcu_torture_barrier_cbs,
+ barrier_cbs_tasks[i]);
+ kfree(barrier_cbs_tasks);
+ barrier_cbs_tasks = NULL;
+ }
+ if (barrier_cbs_wq != NULL) {
+ kfree(barrier_cbs_wq);
+ barrier_cbs_wq = NULL;
+ }
+}
+
+static int rcutorture_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ (void)rcutorture_booster_init(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ rcutorture_booster_cleanup(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rcutorture_cpu_nb = {
+ .notifier_call = rcutorture_cpu_notify,
+};
+
+static void
+rcu_torture_cleanup(void)
+{
+ int i;
+
+ rcutorture_record_test_transition();
+ if (torture_cleanup()) {
+ if (cur_ops->cb_barrier != NULL)
+ cur_ops->cb_barrier();
+ return;
+ }
+
+ rcu_torture_barrier_cleanup();
+ torture_stop_kthread(rcu_torture_stall, stall_task);
+ torture_stop_kthread(rcu_torture_writer, writer_task);
+
+ if (reader_tasks) {
+ for (i = 0; i < nrealreaders; i++)
+ torture_stop_kthread(rcu_torture_reader,
+ reader_tasks[i]);
+ kfree(reader_tasks);
+ }
+ rcu_torture_current = NULL;
+
+ if (fakewriter_tasks) {
+ for (i = 0; i < nfakewriters; i++) {
+ torture_stop_kthread(rcu_torture_fakewriter,
+ fakewriter_tasks[i]);
+ }
+ kfree(fakewriter_tasks);
+ fakewriter_tasks = NULL;
+ }
+
+ torture_stop_kthread(rcu_torture_stats, stats_task);
+ torture_stop_kthread(rcu_torture_fqs, fqs_task);
+ if ((test_boost == 1 && cur_ops->can_boost) ||
+ test_boost == 2) {
+ unregister_cpu_notifier(&rcutorture_cpu_nb);
+ for_each_possible_cpu(i)
+ rcutorture_booster_cleanup(i);
+ }
+
+ /* Wait for all RCU callbacks to fire. */
+
+ if (cur_ops->cb_barrier != NULL)
+ cur_ops->cb_barrier();
+
+ rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+
+ if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
+ rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+ else if (torture_onoff_failures())
+ rcu_torture_print_module_parms(cur_ops,
+ "End of test: RCU_HOTPLUG");
+ else
+ rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
+}
+
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+static void rcu_torture_leak_cb(struct rcu_head *rhp)
+{
+}
+
+static void rcu_torture_err_cb(struct rcu_head *rhp)
+{
+ /*
+ * This -might- happen due to race conditions, but is unlikely.
+ * The scenario that leads to this happening is that the
+ * first of the pair of duplicate callbacks is queued,
+ * someone else starts a grace period that includes that
+ * callback, then the second of the pair must wait for the
+ * next grace period. Unlikely, but can happen. If it
+ * does happen, the debug-objects subsystem won't have splatted.
+ */
+ pr_alert("rcutorture: duplicated callback was invoked.\n");
+}
+#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+/*
+ * Verify that double-free causes debug-objects to complain, but only
+ * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
+ * cannot be carried out.
+ */
+static void rcu_test_debug_objects(void)
+{
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+ struct rcu_head rh1;
+ struct rcu_head rh2;
+
+ init_rcu_head_on_stack(&rh1);
+ init_rcu_head_on_stack(&rh2);
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
+
+ /* Try to queue the rh2 pair of callbacks for the same grace period. */
+ preempt_disable(); /* Prevent preemption from interrupting test. */
+ rcu_read_lock(); /* Make it impossible to finish a grace period. */
+ call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
+ local_irq_disable(); /* Make it harder to start a new grace period. */
+ call_rcu(&rh2, rcu_torture_leak_cb);
+ call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ local_irq_enable();
+ rcu_read_unlock();
+ preempt_enable();
+
+ /* Wait for them all to get done so we can safely return. */
+ rcu_barrier();
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
+ destroy_rcu_head_on_stack(&rh1);
+ destroy_rcu_head_on_stack(&rh2);
+#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+ pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
+#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+}
+
+static int __init
+rcu_torture_init(void)
+{
+ int i;
+ int cpu;
+ int firsterr = 0;
+ static struct rcu_torture_ops *torture_ops[] = {
+ &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
+ };
+
+ torture_init_begin(torture_type, verbose, &rcutorture_runnable);
+
+ /* Process args and tell the world that the torturer is on the job. */
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
+ cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cur_ops->name) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(torture_ops)) {
+ pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
+ torture_type);
+ pr_alert("rcu-torture types:");
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
+ pr_alert(" %s", torture_ops[i]->name);
+ pr_alert("\n");
+ torture_init_end();
+ return -EINVAL;
+ }
+ if (cur_ops->fqs == NULL && fqs_duration != 0) {
+ pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
+ fqs_duration = 0;
+ }
+ if (cur_ops->init)
+ cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
+ if (nreaders >= 0)
+ nrealreaders = nreaders;
+ else
+ nrealreaders = 2 * num_online_cpus();
+ rcu_torture_print_module_parms(cur_ops, "Start of test");
+
+ /* Set up the freelist. */
+
+ INIT_LIST_HEAD(&rcu_torture_freelist);
+ for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
+ rcu_tortures[i].rtort_mbtest = 0;
+ list_add_tail(&rcu_tortures[i].rtort_free,
+ &rcu_torture_freelist);
+ }
+
+ /* Initialize the statistics so that each run gets its own numbers. */
+
+ rcu_torture_current = NULL;
+ rcu_torture_current_version = 0;
+ atomic_set(&n_rcu_torture_alloc, 0);
+ atomic_set(&n_rcu_torture_alloc_fail, 0);
+ atomic_set(&n_rcu_torture_free, 0);
+ atomic_set(&n_rcu_torture_mberror, 0);
+ atomic_set(&n_rcu_torture_error, 0);
+ n_rcu_torture_barrier_error = 0;
+ n_rcu_torture_boost_ktrerror = 0;
+ n_rcu_torture_boost_rterror = 0;
+ n_rcu_torture_boost_failure = 0;
+ n_rcu_torture_boosts = 0;
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ atomic_set(&rcu_torture_wcount[i], 0);
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ per_cpu(rcu_torture_count, cpu)[i] = 0;
+ per_cpu(rcu_torture_batch, cpu)[i] = 0;
+ }
+ }
+
+ /* Start up the kthreads. */
+
+ firsterr = torture_create_kthread(rcu_torture_writer, NULL,
+ writer_task);
+ if (firsterr)
+ goto unwind;
+ fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
+ GFP_KERNEL);
+ if (fakewriter_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nfakewriters; i++) {
+ firsterr = torture_create_kthread(rcu_torture_fakewriter,
+ NULL, fakewriter_tasks[i]);
+ if (firsterr)
+ goto unwind;
+ }
+ reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
+ GFP_KERNEL);
+ if (reader_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nrealreaders; i++) {
+ firsterr = torture_create_kthread(rcu_torture_reader, NULL,
+ reader_tasks[i]);
+ if (firsterr)
+ goto unwind;
+ }
+ if (stat_interval > 0) {
+ firsterr = torture_create_kthread(rcu_torture_stats, NULL,
+ stats_task);
+ if (firsterr)
+ goto unwind;
+ }
+ if (test_no_idle_hz) {
+ firsterr = torture_shuffle_init(shuffle_interval * HZ);
+ if (firsterr)
+ goto unwind;
+ }
+ if (stutter < 0)
+ stutter = 0;
+ if (stutter) {
+ firsterr = torture_stutter_init(stutter * HZ);
+ if (firsterr)
+ goto unwind;
+ }
+ if (fqs_duration < 0)
+ fqs_duration = 0;
+ if (fqs_duration) {
+ /* Create the fqs thread */
+ torture_create_kthread(rcu_torture_fqs, NULL, fqs_task);
+ if (firsterr)
+ goto unwind;
+ }
+ if (test_boost_interval < 1)
+ test_boost_interval = 1;
+ if (test_boost_duration < 2)
+ test_boost_duration = 2;
+ if ((test_boost == 1 && cur_ops->can_boost) ||
+ test_boost == 2) {
+
+ boost_starttime = jiffies + test_boost_interval * HZ;
+ register_cpu_notifier(&rcutorture_cpu_nb);
+ for_each_possible_cpu(i) {
+ if (cpu_is_offline(i))
+ continue; /* Heuristic: CPU can go offline. */
+ firsterr = rcutorture_booster_init(i);
+ if (firsterr)
+ goto unwind;
+ }
+ }
+ firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
+ if (firsterr)
+ goto unwind;
+ firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+ if (firsterr)
+ goto unwind;
+ firsterr = rcu_torture_stall_init();
+ if (firsterr)
+ goto unwind;
+ firsterr = rcu_torture_barrier_init();
+ if (firsterr)
+ goto unwind;
+ if (object_debug)
+ rcu_test_debug_objects();
+ rcutorture_record_test_transition();
+ torture_init_end();
+ return 0;
+
+unwind:
+ torture_init_end();
+ rcu_torture_cleanup();
+ return firsterr;
+}
+
+module_init(rcu_torture_init);
+module_exit(rcu_torture_cleanup);
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 3318d82..c639556 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (C) IBM Corporation, 2006
* Copyright (C) Fujitsu, 2012
@@ -36,8 +36,6 @@
#include <linux/delay.h>
#include <linux/srcu.h>
-#include <trace/events/rcu.h>
-
#include "rcu.h"
/*
@@ -398,7 +396,7 @@
rcu_batch_queue(&sp->batch_queue, head);
if (!sp->running) {
sp->running = true;
- schedule_delayed_work(&sp->work, 0);
+ queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
}
spin_unlock_irqrestore(&sp->queue_lock, flags);
}
@@ -674,7 +672,8 @@
}
if (pending)
- schedule_delayed_work(&sp->work, SRCU_INTERVAL);
+ queue_delayed_work(system_power_efficient_wq,
+ &sp->work, SRCU_INTERVAL);
}
/*
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1254f31..d9efcc13 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
@@ -37,10 +37,6 @@
#include <linux/prefetch.h>
#include <linux/ftrace_event.h>
-#ifdef CONFIG_RCU_TRACE
-#include <trace/events/rcu.h>
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
#include "rcu.h"
/* Forward declarations for tiny_plugin.h. */
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 280d06c..4315285 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -14,8 +14,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (c) 2010 Linaro
*
diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
deleted file mode 100644
index 732f8ae..0000000
--- a/kernel/rcu/torture.c
+++ /dev/null
@@ -1,2148 +0,0 @@
-/*
- * Read-Copy Update module-based torture test facility
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2005, 2006
- *
- * Authors: Paul E. McKenney <paulmck@us.ibm.com>
- * Josh Triplett <josh@freedesktop.org>
- *
- * See also: Documentation/RCU/torture.txt
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/freezer.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <linux/stat.h>
-#include <linux/srcu.h>
-#include <linux/slab.h>
-#include <linux/trace_clock.h>
-#include <asm/byteorder.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
-
-MODULE_ALIAS("rcutorture");
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "rcutorture."
-
-static int fqs_duration;
-module_param(fqs_duration, int, 0444);
-MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
-static int fqs_holdoff;
-module_param(fqs_holdoff, int, 0444);
-MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
-static int fqs_stutter = 3;
-module_param(fqs_stutter, int, 0444);
-MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
-static bool gp_exp;
-module_param(gp_exp, bool, 0444);
-MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives");
-static bool gp_normal;
-module_param(gp_normal, bool, 0444);
-MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives");
-static int irqreader = 1;
-module_param(irqreader, int, 0444);
-MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
-static int n_barrier_cbs;
-module_param(n_barrier_cbs, int, 0444);
-MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
-static int nfakewriters = 4;
-module_param(nfakewriters, int, 0444);
-MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-static int nreaders = -1;
-module_param(nreaders, int, 0444);
-MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-static int object_debug;
-module_param(object_debug, int, 0444);
-MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing");
-static int onoff_holdoff;
-module_param(onoff_holdoff, int, 0444);
-MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
-static int onoff_interval;
-module_param(onoff_interval, int, 0444);
-MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
-static int shuffle_interval = 3;
-module_param(shuffle_interval, int, 0444);
-MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-static int shutdown_secs;
-module_param(shutdown_secs, int, 0444);
-MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable.");
-static int stall_cpu;
-module_param(stall_cpu, int, 0444);
-MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
-static int stall_cpu_holdoff = 10;
-module_param(stall_cpu_holdoff, int, 0444);
-MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
-static int stat_interval = 60;
-module_param(stat_interval, int, 0644);
-MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-static int stutter = 5;
-module_param(stutter, int, 0444);
-MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
-static int test_boost = 1;
-module_param(test_boost, int, 0444);
-MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
-static int test_boost_duration = 4;
-module_param(test_boost_duration, int, 0444);
-MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
-static int test_boost_interval = 7;
-module_param(test_boost_interval, int, 0444);
-MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
-static bool test_no_idle_hz = true;
-module_param(test_no_idle_hz, bool, 0444);
-MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-static char *torture_type = "rcu";
-module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
-static bool verbose;
-module_param(verbose, bool, 0444);
-MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-
-#define TORTURE_FLAG "-torture:"
-#define PRINTK_STRING(s) \
- do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_STRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_ERRSTRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
-
-static int nrealreaders;
-static struct task_struct *writer_task;
-static struct task_struct **fakewriter_tasks;
-static struct task_struct **reader_tasks;
-static struct task_struct *stats_task;
-static struct task_struct *shuffler_task;
-static struct task_struct *stutter_task;
-static struct task_struct *fqs_task;
-static struct task_struct *boost_tasks[NR_CPUS];
-static struct task_struct *shutdown_task;
-#ifdef CONFIG_HOTPLUG_CPU
-static struct task_struct *onoff_task;
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static struct task_struct *stall_task;
-static struct task_struct **barrier_cbs_tasks;
-static struct task_struct *barrier_task;
-
-#define RCU_TORTURE_PIPE_LEN 10
-
-struct rcu_torture {
- struct rcu_head rtort_rcu;
- int rtort_pipe_count;
- struct list_head rtort_free;
- int rtort_mbtest;
-};
-
-static LIST_HEAD(rcu_torture_freelist);
-static struct rcu_torture __rcu *rcu_torture_current;
-static unsigned long rcu_torture_current_version;
-static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
-static DEFINE_SPINLOCK(rcu_torture_lock);
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
- { 0 };
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
- { 0 };
-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
-static atomic_t n_rcu_torture_alloc;
-static atomic_t n_rcu_torture_alloc_fail;
-static atomic_t n_rcu_torture_free;
-static atomic_t n_rcu_torture_mberror;
-static atomic_t n_rcu_torture_error;
-static long n_rcu_torture_barrier_error;
-static long n_rcu_torture_boost_ktrerror;
-static long n_rcu_torture_boost_rterror;
-static long n_rcu_torture_boost_failure;
-static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
-static long n_offline_attempts;
-static long n_offline_successes;
-static unsigned long sum_offline;
-static int min_offline = -1;
-static int max_offline;
-static long n_online_attempts;
-static long n_online_successes;
-static unsigned long sum_online;
-static int min_online = -1;
-static int max_online;
-static long n_barrier_attempts;
-static long n_barrier_successes;
-static struct list_head rcu_torture_removed;
-static cpumask_var_t shuffle_tmp_mask;
-
-static int stutter_pause_test;
-
-#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
-#define RCUTORTURE_RUNNABLE_INIT 1
-#else
-#define RCUTORTURE_RUNNABLE_INIT 0
-#endif
-int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
-module_param(rcutorture_runnable, int, 0444);
-MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
-
-#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
-#define rcu_can_boost() 1
-#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
-#define rcu_can_boost() 0
-#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
-
-#ifdef CONFIG_RCU_TRACE
-static u64 notrace rcu_trace_clock_local(void)
-{
- u64 ts = trace_clock_local();
- unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
- return ts;
-}
-#else /* #ifdef CONFIG_RCU_TRACE */
-static u64 notrace rcu_trace_clock_local(void)
-{
- return 0ULL;
-}
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
-static unsigned long shutdown_time; /* jiffies to system shutdown. */
-static unsigned long boost_starttime; /* jiffies of next boost test start. */
-DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
- /* and boost task create/destroy. */
-static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
-static bool barrier_phase; /* Test phase. */
-static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
-static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
-static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
-
-/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
-
-#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
-#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
-#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
-static int fullstop = FULLSTOP_RMMOD;
-/*
- * Protect fullstop transitions and spawning of kthreads.
- */
-static DEFINE_MUTEX(fullstop_mutex);
-
-/* Forward reference. */
-static void rcu_torture_cleanup(void);
-
-/*
- * Detect and respond to a system shutdown.
- */
-static int
-rcutorture_shutdown_notify(struct notifier_block *unused1,
- unsigned long unused2, void *unused3)
-{
- mutex_lock(&fullstop_mutex);
- if (fullstop == FULLSTOP_DONTSTOP)
- fullstop = FULLSTOP_SHUTDOWN;
- else
- pr_warn(/* but going down anyway, so... */
- "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
- mutex_unlock(&fullstop_mutex);
- return NOTIFY_DONE;
-}
-
-/*
- * Absorb kthreads into a kernel function that won't return, so that
- * they won't ever access module text or data again.
- */
-static void rcutorture_shutdown_absorb(const char *title)
-{
- if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
- pr_notice(
- "rcutorture thread %s parking due to system shutdown\n",
- title);
- schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
- }
-}
-
-/*
- * Allocate an element from the rcu_tortures pool.
- */
-static struct rcu_torture *
-rcu_torture_alloc(void)
-{
- struct list_head *p;
-
- spin_lock_bh(&rcu_torture_lock);
- if (list_empty(&rcu_torture_freelist)) {
- atomic_inc(&n_rcu_torture_alloc_fail);
- spin_unlock_bh(&rcu_torture_lock);
- return NULL;
- }
- atomic_inc(&n_rcu_torture_alloc);
- p = rcu_torture_freelist.next;
- list_del_init(p);
- spin_unlock_bh(&rcu_torture_lock);
- return container_of(p, struct rcu_torture, rtort_free);
-}
-
-/*
- * Free an element to the rcu_tortures pool.
- */
-static void
-rcu_torture_free(struct rcu_torture *p)
-{
- atomic_inc(&n_rcu_torture_free);
- spin_lock_bh(&rcu_torture_lock);
- list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock_bh(&rcu_torture_lock);
-}
-
-struct rcu_random_state {
- unsigned long rrs_state;
- long rrs_count;
-};
-
-#define RCU_RANDOM_MULT 39916801 /* prime */
-#define RCU_RANDOM_ADD 479001701 /* prime */
-#define RCU_RANDOM_REFRESH 10000
-
-#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
-
-/*
- * Crude but fast random-number generator. Uses a linear congruential
- * generator, with occasional help from cpu_clock().
- */
-static unsigned long
-rcu_random(struct rcu_random_state *rrsp)
-{
- if (--rrsp->rrs_count < 0) {
- rrsp->rrs_state += (unsigned long)local_clock();
- rrsp->rrs_count = RCU_RANDOM_REFRESH;
- }
- rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
- return swahw32(rrsp->rrs_state);
-}
-
-static void
-rcu_stutter_wait(const char *title)
-{
- while (stutter_pause_test || !rcutorture_runnable) {
- if (rcutorture_runnable)
- schedule_timeout_interruptible(1);
- else
- schedule_timeout_interruptible(round_jiffies_relative(HZ));
- rcutorture_shutdown_absorb(title);
- }
-}
-
-/*
- * Operations vector for selecting different types of tests.
- */
-
-struct rcu_torture_ops {
- void (*init)(void);
- int (*readlock)(void);
- void (*read_delay)(struct rcu_random_state *rrsp);
- void (*readunlock)(int idx);
- int (*completed)(void);
- void (*deferred_free)(struct rcu_torture *p);
- void (*sync)(void);
- void (*exp_sync)(void);
- void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
- void (*cb_barrier)(void);
- void (*fqs)(void);
- void (*stats)(char *page);
- int irq_capable;
- int can_boost;
- const char *name;
-};
-
-static struct rcu_torture_ops *cur_ops;
-
-/*
- * Definitions for rcu torture testing.
- */
-
-static int rcu_torture_read_lock(void) __acquires(RCU)
-{
- rcu_read_lock();
- return 0;
-}
-
-static void rcu_read_delay(struct rcu_random_state *rrsp)
-{
- const unsigned long shortdelay_us = 200;
- const unsigned long longdelay_ms = 50;
-
- /* We want a short delay sometimes to make a reader delay the grace
- * period, and we want a long delay occasionally to trigger
- * force_quiescent_state. */
-
- if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
- mdelay(longdelay_ms);
- if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
- udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
- if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
- preempt_schedule(); /* No QS if preempt_disable() in effect */
-#endif
-}
-
-static void rcu_torture_read_unlock(int idx) __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int rcu_torture_completed(void)
-{
- return rcu_batches_completed();
-}
-
-static void
-rcu_torture_cb(struct rcu_head *p)
-{
- int i;
- struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
-
- if (fullstop != FULLSTOP_DONTSTOP) {
- /* Test is ending, just drop callbacks on the floor. */
- /* The next initialization will pick up the pieces. */
- return;
- }
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- rcu_torture_free(rp);
- } else {
- cur_ops->deferred_free(rp);
- }
-}
-
-static int rcu_no_completed(void)
-{
- return 0;
-}
-
-static void rcu_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static void rcu_sync_torture_init(void)
-{
- INIT_LIST_HEAD(&rcu_torture_removed);
-}
-
-static struct rcu_torture_ops rcu_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferred_free = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .exp_sync = synchronize_rcu_expedited,
- .call = call_rcu,
- .cb_barrier = rcu_barrier,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .name = "rcu"
-};
-
-/*
- * Definitions for rcu_bh torture testing.
- */
-
-static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static int rcu_bh_torture_completed(void)
-{
- return rcu_batches_completed_bh();
-}
-
-static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops rcu_bh_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferred_free = rcu_bh_torture_deferred_free,
- .sync = synchronize_rcu_bh,
- .exp_sync = synchronize_rcu_bh_expedited,
- .call = call_rcu_bh,
- .cb_barrier = rcu_barrier_bh,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "rcu_bh"
-};
-
-/*
- * Definitions for srcu torture testing.
- */
-
-DEFINE_STATIC_SRCU(srcu_ctl);
-
-static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
-{
- return srcu_read_lock(&srcu_ctl);
-}
-
-static void srcu_read_delay(struct rcu_random_state *rrsp)
-{
- long delay;
- const long uspertick = 1000000 / HZ;
- const long longdelay = 10;
-
- /* We want there to be long-running readers, but not all the time. */
-
- delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
- if (!delay)
- schedule_timeout_interruptible(longdelay);
- else
- rcu_read_delay(rrsp);
-}
-
-static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
-{
- srcu_read_unlock(&srcu_ctl, idx);
-}
-
-static int srcu_torture_completed(void)
-{
- return srcu_batches_completed(&srcu_ctl);
-}
-
-static void srcu_torture_deferred_free(struct rcu_torture *rp)
-{
- call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
-}
-
-static void srcu_torture_synchronize(void)
-{
- synchronize_srcu(&srcu_ctl);
-}
-
-static void srcu_torture_call(struct rcu_head *head,
- void (*func)(struct rcu_head *head))
-{
- call_srcu(&srcu_ctl, head, func);
-}
-
-static void srcu_torture_barrier(void)
-{
- srcu_barrier(&srcu_ctl);
-}
-
-static void srcu_torture_stats(char *page)
-{
- int cpu;
- int idx = srcu_ctl.completed & 0x1;
-
- page += sprintf(page, "%s%s per-CPU(idx=%d):",
- torture_type, TORTURE_FLAG, idx);
- for_each_possible_cpu(cpu) {
- page += sprintf(page, " %d(%lu,%lu)", cpu,
- per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
- per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
- }
- sprintf(page, "\n");
-}
-
-static void srcu_torture_synchronize_expedited(void)
-{
- synchronize_srcu_expedited(&srcu_ctl);
-}
-
-static struct rcu_torture_ops srcu_ops = {
- .init = rcu_sync_torture_init,
- .readlock = srcu_torture_read_lock,
- .read_delay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferred_free = srcu_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .exp_sync = srcu_torture_synchronize_expedited,
- .call = srcu_torture_call,
- .cb_barrier = srcu_torture_barrier,
- .stats = srcu_torture_stats,
- .name = "srcu"
-};
-
-/*
- * Definitions for sched torture testing.
- */
-
-static int sched_torture_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_torture_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops sched_ops = {
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sched_torture_deferred_free,
- .sync = synchronize_sched,
- .exp_sync = synchronize_sched_expedited,
- .call = call_rcu_sched,
- .cb_barrier = rcu_barrier_sched,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "sched"
-};
-
-/*
- * RCU torture priority-boost testing. Runs one real-time thread per
- * CPU for moderate bursts, repeatedly registering RCU callbacks and
- * spinning waiting for them to be invoked. If a given callback takes
- * too long to be invoked, we assume that priority inversion has occurred.
- */
-
-struct rcu_boost_inflight {
- struct rcu_head rcu;
- int inflight;
-};
-
-static void rcu_torture_boost_cb(struct rcu_head *head)
-{
- struct rcu_boost_inflight *rbip =
- container_of(head, struct rcu_boost_inflight, rcu);
-
- smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
- rbip->inflight = 0;
-}
-
-static int rcu_torture_boost(void *arg)
-{
- unsigned long call_rcu_time;
- unsigned long endtime;
- unsigned long oldstarttime;
- struct rcu_boost_inflight rbi = { .inflight = 0 };
- struct sched_param sp;
-
- VERBOSE_PRINTK_STRING("rcu_torture_boost started");
-
- /* Set real-time priority. */
- sp.sched_priority = 1;
- if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
- VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
- n_rcu_torture_boost_rterror++;
- }
-
- init_rcu_head_on_stack(&rbi.rcu);
- /* Each pass through the following loop does one boost-test cycle. */
- do {
- /* Wait for the next test interval. */
- oldstarttime = boost_starttime;
- while (ULONG_CMP_LT(jiffies, oldstarttime)) {
- schedule_timeout_interruptible(oldstarttime - jiffies);
- rcu_stutter_wait("rcu_torture_boost");
- if (kthread_should_stop() ||
- fullstop != FULLSTOP_DONTSTOP)
- goto checkwait;
- }
-
- /* Do one boost-test interval. */
- endtime = oldstarttime + test_boost_duration * HZ;
- call_rcu_time = jiffies;
- while (ULONG_CMP_LT(jiffies, endtime)) {
- /* If we don't have a callback in flight, post one. */
- if (!rbi.inflight) {
- smp_mb(); /* RCU core before ->inflight = 1. */
- rbi.inflight = 1;
- call_rcu(&rbi.rcu, rcu_torture_boost_cb);
- if (jiffies - call_rcu_time >
- test_boost_duration * HZ - HZ / 2) {
- VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
- n_rcu_torture_boost_failure++;
- }
- call_rcu_time = jiffies;
- }
- cond_resched();
- rcu_stutter_wait("rcu_torture_boost");
- if (kthread_should_stop() ||
- fullstop != FULLSTOP_DONTSTOP)
- goto checkwait;
- }
-
- /*
- * Set the start time of the next test interval.
- * Yes, this is vulnerable to long delays, but such
- * delays simply cause a false negative for the next
- * interval. Besides, we are running at RT priority,
- * so delays should be relatively rare.
- */
- while (oldstarttime == boost_starttime &&
- !kthread_should_stop()) {
- if (mutex_trylock(&boost_mutex)) {
- boost_starttime = jiffies +
- test_boost_interval * HZ;
- n_rcu_torture_boosts++;
- mutex_unlock(&boost_mutex);
- break;
- }
- schedule_timeout_uninterruptible(1);
- }
-
- /* Go do the stutter. */
-checkwait: rcu_stutter_wait("rcu_torture_boost");
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-
- /* Clean up and exit. */
- VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
- rcutorture_shutdown_absorb("rcu_torture_boost");
- while (!kthread_should_stop() || rbi.inflight)
- schedule_timeout_uninterruptible(1);
- smp_mb(); /* order accesses to ->inflight before stack-frame death. */
- destroy_rcu_head_on_stack(&rbi.rcu);
- return 0;
-}
-
-/*
- * RCU torture force-quiescent-state kthread. Repeatedly induces
- * bursts of calls to force_quiescent_state(), increasing the probability
- * of occurrence of some important types of race conditions.
- */
-static int
-rcu_torture_fqs(void *arg)
-{
- unsigned long fqs_resume_time;
- int fqs_burst_remaining;
-
- VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
- do {
- fqs_resume_time = jiffies + fqs_stutter * HZ;
- while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
- !kthread_should_stop()) {
- schedule_timeout_interruptible(1);
- }
- fqs_burst_remaining = fqs_duration;
- while (fqs_burst_remaining > 0 &&
- !kthread_should_stop()) {
- cur_ops->fqs();
- udelay(fqs_holdoff);
- fqs_burst_remaining -= fqs_holdoff;
- }
- rcu_stutter_wait("rcu_torture_fqs");
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
- VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
- rcutorture_shutdown_absorb("rcu_torture_fqs");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * RCU torture writer kthread. Repeatedly substitutes a new structure
- * for that pointed to by rcu_torture_current, freeing the old structure
- * after a series of grace periods (the "pipeline").
- */
-static int
-rcu_torture_writer(void *arg)
-{
- bool exp;
- int i;
- struct rcu_torture *rp;
- struct rcu_torture *rp1;
- struct rcu_torture *old_rp;
- static DEFINE_RCU_RANDOM(rand);
-
- VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
- set_user_nice(current, 19);
-
- do {
- schedule_timeout_uninterruptible(1);
- rp = rcu_torture_alloc();
- if (rp == NULL)
- continue;
- rp->rtort_pipe_count = 0;
- udelay(rcu_random(&rand) & 0x3ff);
- old_rp = rcu_dereference_check(rcu_torture_current,
- current == writer_task);
- rp->rtort_mbtest = 1;
- rcu_assign_pointer(rcu_torture_current, rp);
- smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
- if (old_rp) {
- i = old_rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- old_rp->rtort_pipe_count++;
- if (gp_normal == gp_exp)
- exp = !!(rcu_random(&rand) & 0x80);
- else
- exp = gp_exp;
- if (!exp) {
- cur_ops->deferred_free(old_rp);
- } else {
- cur_ops->exp_sync();
- list_add(&old_rp->rtort_free,
- &rcu_torture_removed);
- list_for_each_entry_safe(rp, rp1,
- &rcu_torture_removed,
- rtort_free) {
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >=
- RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- list_del(&rp->rtort_free);
- rcu_torture_free(rp);
- }
- }
- }
- }
- rcutorture_record_progress(++rcu_torture_current_version);
- rcu_stutter_wait("rcu_torture_writer");
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
- VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- rcutorture_shutdown_absorb("rcu_torture_writer");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * RCU torture fake writer kthread. Repeatedly calls sync, with a random
- * delay between calls.
- */
-static int
-rcu_torture_fakewriter(void *arg)
-{
- DEFINE_RCU_RANDOM(rand);
-
- VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
- set_user_nice(current, 19);
-
- do {
- schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
- udelay(rcu_random(&rand) & 0x3ff);
- if (cur_ops->cb_barrier != NULL &&
- rcu_random(&rand) % (nfakewriters * 8) == 0) {
- cur_ops->cb_barrier();
- } else if (gp_normal == gp_exp) {
- if (rcu_random(&rand) & 0x80)
- cur_ops->sync();
- else
- cur_ops->exp_sync();
- } else if (gp_normal) {
- cur_ops->sync();
- } else {
- cur_ops->exp_sync();
- }
- rcu_stutter_wait("rcu_torture_fakewriter");
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-
- VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- rcutorture_shutdown_absorb("rcu_torture_fakewriter");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-void rcutorture_trace_dump(void)
-{
- static atomic_t beenhere = ATOMIC_INIT(0);
-
- if (atomic_read(&beenhere))
- return;
- if (atomic_xchg(&beenhere, 1) != 0)
- return;
- ftrace_dump(DUMP_ALL);
-}
-
-/*
- * RCU torture reader from timer handler. Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array. The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
- */
-static void rcu_torture_timer(unsigned long unused)
-{
- int idx;
- int completed;
- int completed_end;
- static DEFINE_RCU_RANDOM(rand);
- static DEFINE_SPINLOCK(rand_lock);
- struct rcu_torture *p;
- int pipe_count;
- unsigned long long ts;
-
- idx = cur_ops->readlock();
- completed = cur_ops->completed();
- ts = rcu_trace_clock_local();
- p = rcu_dereference_check(rcu_torture_current,
- rcu_read_lock_bh_held() ||
- rcu_read_lock_sched_held() ||
- srcu_read_lock_held(&srcu_ctl));
- if (p == NULL) {
- /* Leave because rcu_torture_writer is not yet underway */
- cur_ops->readunlock(idx);
- return;
- }
- if (p->rtort_mbtest == 0)
- atomic_inc(&n_rcu_torture_mberror);
- spin_lock(&rand_lock);
- cur_ops->read_delay(&rand);
- n_rcu_torture_timers++;
- spin_unlock(&rand_lock);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
- if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- pipe_count = RCU_TORTURE_PIPE_LEN;
- }
- completed_end = cur_ops->completed();
- if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
- completed, completed_end);
- rcutorture_trace_dump();
- }
- __this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed_end - completed;
- if (completed > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- completed = RCU_TORTURE_PIPE_LEN;
- }
- __this_cpu_inc(rcu_torture_batch[completed]);
- preempt_enable();
- cur_ops->readunlock(idx);
-}
-
-/*
- * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array. The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
- */
-static int
-rcu_torture_reader(void *arg)
-{
- int completed;
- int completed_end;
- int idx;
- DEFINE_RCU_RANDOM(rand);
- struct rcu_torture *p;
- int pipe_count;
- struct timer_list t;
- unsigned long long ts;
-
- VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
- set_user_nice(current, 19);
- if (irqreader && cur_ops->irq_capable)
- setup_timer_on_stack(&t, rcu_torture_timer, 0);
-
- do {
- if (irqreader && cur_ops->irq_capable) {
- if (!timer_pending(&t))
- mod_timer(&t, jiffies + 1);
- }
- idx = cur_ops->readlock();
- completed = cur_ops->completed();
- ts = rcu_trace_clock_local();
- p = rcu_dereference_check(rcu_torture_current,
- rcu_read_lock_bh_held() ||
- rcu_read_lock_sched_held() ||
- srcu_read_lock_held(&srcu_ctl));
- if (p == NULL) {
- /* Wait for rcu_torture_writer to get underway */
- cur_ops->readunlock(idx);
- schedule_timeout_interruptible(HZ);
- continue;
- }
- if (p->rtort_mbtest == 0)
- atomic_inc(&n_rcu_torture_mberror);
- cur_ops->read_delay(&rand);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
- if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- pipe_count = RCU_TORTURE_PIPE_LEN;
- }
- completed_end = cur_ops->completed();
- if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
- ts, completed, completed_end);
- rcutorture_trace_dump();
- }
- __this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed_end - completed;
- if (completed > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- completed = RCU_TORTURE_PIPE_LEN;
- }
- __this_cpu_inc(rcu_torture_batch[completed]);
- preempt_enable();
- cur_ops->readunlock(idx);
- schedule();
- rcu_stutter_wait("rcu_torture_reader");
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
- VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
- rcutorture_shutdown_absorb("rcu_torture_reader");
- if (irqreader && cur_ops->irq_capable)
- del_timer_sync(&t);
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * Create an RCU-torture statistics message in the specified buffer.
- */
-static void
-rcu_torture_printk(char *page)
-{
- int cpu;
- int i;
- long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
- long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
-
- for_each_possible_cpu(cpu) {
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
- batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
- }
- }
- for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
- if (pipesummary[i] != 0)
- break;
- }
- page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page,
- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
- rcu_torture_current,
- rcu_torture_current_version,
- list_empty(&rcu_torture_freelist),
- atomic_read(&n_rcu_torture_alloc),
- atomic_read(&n_rcu_torture_alloc_fail),
- atomic_read(&n_rcu_torture_free));
- page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
- atomic_read(&n_rcu_torture_mberror),
- n_rcu_torture_boost_ktrerror,
- n_rcu_torture_boost_rterror);
- page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
- n_rcu_torture_boost_failure,
- n_rcu_torture_boosts,
- n_rcu_torture_timers);
- page += sprintf(page,
- "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
- n_online_successes, n_online_attempts,
- n_offline_successes, n_offline_attempts,
- min_online, max_online,
- min_offline, max_offline,
- sum_online, sum_offline, HZ);
- page += sprintf(page, "barrier: %ld/%ld:%ld",
- n_barrier_successes,
- n_barrier_attempts,
- n_rcu_torture_barrier_error);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
- n_rcu_torture_barrier_error != 0 ||
- n_rcu_torture_boost_ktrerror != 0 ||
- n_rcu_torture_boost_rterror != 0 ||
- n_rcu_torture_boost_failure != 0 ||
- i > 1) {
- page += sprintf(page, "!!! ");
- atomic_inc(&n_rcu_torture_error);
- WARN_ON_ONCE(1);
- }
- page += sprintf(page, "Reader Pipe: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- page += sprintf(page, " %ld", pipesummary[i]);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page, "Reader Batch: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- page += sprintf(page, " %ld", batchsummary[i]);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page, "Free-Block Circulation: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- page += sprintf(page, " %d",
- atomic_read(&rcu_torture_wcount[i]));
- }
- page += sprintf(page, "\n");
- if (cur_ops->stats)
- cur_ops->stats(page);
-}
-
-/*
- * Print torture statistics. Caller must ensure that there is only
- * one call to this function at a given time!!! This is normally
- * accomplished by relying on the module system to only have one copy
- * of the module loaded, and then by giving the rcu_torture_stats
- * kthread full control (or the init/cleanup functions when rcu_torture_stats
- * thread is not running).
- */
-static void
-rcu_torture_stats_print(void)
-{
- int size = nr_cpu_ids * 200 + 8192;
- char *buf;
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- pr_err("rcu-torture: Out of memory, need: %d", size);
- return;
- }
- rcu_torture_printk(buf);
- pr_alert("%s", buf);
- kfree(buf);
-}
-
-/*
- * Periodically prints torture statistics, if periodic statistics printing
- * was specified via the stat_interval module parameter.
- *
- * No need to worry about fullstop here, since this one doesn't reference
- * volatile state or register callbacks.
- */
-static int
-rcu_torture_stats(void *arg)
-{
- VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
- do {
- schedule_timeout_interruptible(stat_interval * HZ);
- rcu_torture_stats_print();
- rcutorture_shutdown_absorb("rcu_torture_stats");
- } while (!kthread_should_stop());
- VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
- return 0;
-}
-
-static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
-
-/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
- * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
- */
-static void rcu_torture_shuffle_tasks(void)
-{
- int i;
-
- cpumask_setall(shuffle_tmp_mask);
- get_online_cpus();
-
- /* No point in shuffling if there is only one online CPU (ex: UP) */
- if (num_online_cpus() == 1) {
- put_online_cpus();
- return;
- }
-
- if (rcu_idle_cpu != -1)
- cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
-
- set_cpus_allowed_ptr(current, shuffle_tmp_mask);
-
- if (reader_tasks) {
- for (i = 0; i < nrealreaders; i++)
- if (reader_tasks[i])
- set_cpus_allowed_ptr(reader_tasks[i],
- shuffle_tmp_mask);
- }
- if (fakewriter_tasks) {
- for (i = 0; i < nfakewriters; i++)
- if (fakewriter_tasks[i])
- set_cpus_allowed_ptr(fakewriter_tasks[i],
- shuffle_tmp_mask);
- }
- if (writer_task)
- set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
- if (stats_task)
- set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
- if (stutter_task)
- set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
- if (fqs_task)
- set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
- if (shutdown_task)
- set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
-#ifdef CONFIG_HOTPLUG_CPU
- if (onoff_task)
- set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
- if (stall_task)
- set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
- if (barrier_cbs_tasks)
- for (i = 0; i < n_barrier_cbs; i++)
- if (barrier_cbs_tasks[i])
- set_cpus_allowed_ptr(barrier_cbs_tasks[i],
- shuffle_tmp_mask);
- if (barrier_task)
- set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
-
- if (rcu_idle_cpu == -1)
- rcu_idle_cpu = num_online_cpus() - 1;
- else
- rcu_idle_cpu--;
-
- put_online_cpus();
-}
-
-/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
- * system to become idle at a time and cut off its timer ticks. This is meant
- * to test the support for such tickless idle CPU in RCU.
- */
-static int
-rcu_torture_shuffle(void *arg)
-{
- VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
- do {
- schedule_timeout_interruptible(shuffle_interval * HZ);
- rcu_torture_shuffle_tasks();
- rcutorture_shutdown_absorb("rcu_torture_shuffle");
- } while (!kthread_should_stop());
- VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
- return 0;
-}
-
-/* Cause the rcutorture test to "stutter", starting and stopping all
- * threads periodically.
- */
-static int
-rcu_torture_stutter(void *arg)
-{
- VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
- do {
- schedule_timeout_interruptible(stutter * HZ);
- stutter_pause_test = 1;
- if (!kthread_should_stop())
- schedule_timeout_interruptible(stutter * HZ);
- stutter_pause_test = 0;
- rcutorture_shutdown_absorb("rcu_torture_stutter");
- } while (!kthread_should_stop());
- VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
- return 0;
-}
-
-static inline void
-rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
-{
- pr_alert("%s" TORTURE_FLAG
- "--- %s: nreaders=%d nfakewriters=%d "
- "stat_interval=%d verbose=%d test_no_idle_hz=%d "
- "shuffle_interval=%d stutter=%d irqreader=%d "
- "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
- "test_boost=%d/%d test_boost_interval=%d "
- "test_boost_duration=%d shutdown_secs=%d "
- "stall_cpu=%d stall_cpu_holdoff=%d "
- "n_barrier_cbs=%d "
- "onoff_interval=%d onoff_holdoff=%d\n",
- torture_type, tag, nrealreaders, nfakewriters,
- stat_interval, verbose, test_no_idle_hz, shuffle_interval,
- stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
- test_boost, cur_ops->can_boost,
- test_boost_interval, test_boost_duration, shutdown_secs,
- stall_cpu, stall_cpu_holdoff,
- n_barrier_cbs,
- onoff_interval, onoff_holdoff);
-}
-
-static struct notifier_block rcutorture_shutdown_nb = {
- .notifier_call = rcutorture_shutdown_notify,
-};
-
-static void rcutorture_booster_cleanup(int cpu)
-{
- struct task_struct *t;
-
- if (boost_tasks[cpu] == NULL)
- return;
- mutex_lock(&boost_mutex);
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
- t = boost_tasks[cpu];
- boost_tasks[cpu] = NULL;
- mutex_unlock(&boost_mutex);
-
- /* This must be outside of the mutex, otherwise deadlock! */
- kthread_stop(t);
- boost_tasks[cpu] = NULL;
-}
-
-static int rcutorture_booster_init(int cpu)
-{
- int retval;
-
- if (boost_tasks[cpu] != NULL)
- return 0; /* Already created, nothing more to do. */
-
- /* Don't allow time recalculation while creating a new task. */
- mutex_lock(&boost_mutex);
- VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
- boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
- cpu_to_node(cpu),
- "rcu_torture_boost");
- if (IS_ERR(boost_tasks[cpu])) {
- retval = PTR_ERR(boost_tasks[cpu]);
- VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
- n_rcu_torture_boost_ktrerror++;
- boost_tasks[cpu] = NULL;
- mutex_unlock(&boost_mutex);
- return retval;
- }
- kthread_bind(boost_tasks[cpu], cpu);
- wake_up_process(boost_tasks[cpu]);
- mutex_unlock(&boost_mutex);
- return 0;
-}
-
-/*
- * Cause the rcutorture test to shutdown the system after the test has
- * run for the time specified by the shutdown_secs module parameter.
- */
-static int
-rcu_torture_shutdown(void *arg)
-{
- long delta;
- unsigned long jiffies_snap;
-
- VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
- jiffies_snap = ACCESS_ONCE(jiffies);
- while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
- !kthread_should_stop()) {
- delta = shutdown_time - jiffies_snap;
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_shutdown task: %lu jiffies remaining\n",
- torture_type, delta);
- schedule_timeout_interruptible(delta);
- jiffies_snap = ACCESS_ONCE(jiffies);
- }
- if (kthread_should_stop()) {
- VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
- return 0;
- }
-
- /* OK, shut down the system. */
-
- VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
- shutdown_task = NULL; /* Avoid self-kill deadlock. */
- rcu_torture_cleanup(); /* Get the success/failure message. */
- kernel_power_off(); /* Shut down the system. */
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Execute random CPU-hotplug operations at the interval specified
- * by the onoff_interval.
- */
-static int
-rcu_torture_onoff(void *arg)
-{
- int cpu;
- unsigned long delta;
- int maxcpu = -1;
- DEFINE_RCU_RANDOM(rand);
- int ret;
- unsigned long starttime;
-
- VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
- for_each_online_cpu(cpu)
- maxcpu = cpu;
- WARN_ON(maxcpu < 0);
- if (onoff_holdoff > 0) {
- VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
- schedule_timeout_interruptible(onoff_holdoff * HZ);
- VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
- }
- while (!kthread_should_stop()) {
- cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
- if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: offlining %d\n",
- torture_type, cpu);
- starttime = jiffies;
- n_offline_attempts++;
- ret = cpu_down(cpu);
- if (ret) {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: offline %d failed: errno %d\n",
- torture_type, cpu, ret);
- } else {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: offlined %d\n",
- torture_type, cpu);
- n_offline_successes++;
- delta = jiffies - starttime;
- sum_offline += delta;
- if (min_offline < 0) {
- min_offline = delta;
- max_offline = delta;
- }
- if (min_offline > delta)
- min_offline = delta;
- if (max_offline < delta)
- max_offline = delta;
- }
- } else if (cpu_is_hotpluggable(cpu)) {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: onlining %d\n",
- torture_type, cpu);
- starttime = jiffies;
- n_online_attempts++;
- ret = cpu_up(cpu);
- if (ret) {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: online %d failed: errno %d\n",
- torture_type, cpu, ret);
- } else {
- if (verbose)
- pr_alert("%s" TORTURE_FLAG
- "rcu_torture_onoff task: onlined %d\n",
- torture_type, cpu);
- n_online_successes++;
- delta = jiffies - starttime;
- sum_online += delta;
- if (min_online < 0) {
- min_online = delta;
- max_online = delta;
- }
- if (min_online > delta)
- min_online = delta;
- if (max_online < delta)
- max_online = delta;
- }
- }
- schedule_timeout_interruptible(onoff_interval * HZ);
- }
- VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
- return 0;
-}
-
-static int
-rcu_torture_onoff_init(void)
-{
- int ret;
-
- if (onoff_interval <= 0)
- return 0;
- onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
- if (IS_ERR(onoff_task)) {
- ret = PTR_ERR(onoff_task);
- onoff_task = NULL;
- return ret;
- }
- return 0;
-}
-
-static void rcu_torture_onoff_cleanup(void)
-{
- if (onoff_task == NULL)
- return;
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
- kthread_stop(onoff_task);
- onoff_task = NULL;
-}
-
-#else /* #ifdef CONFIG_HOTPLUG_CPU */
-
-static int
-rcu_torture_onoff_init(void)
-{
- return 0;
-}
-
-static void rcu_torture_onoff_cleanup(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
-
-/*
- * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
- * induces a CPU stall for the time specified by stall_cpu.
- */
-static int rcu_torture_stall(void *args)
-{
- unsigned long stop_at;
-
- VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
- if (stall_cpu_holdoff > 0) {
- VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
- schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
- VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
- }
- if (!kthread_should_stop()) {
- stop_at = get_seconds() + stall_cpu;
- /* RCU CPU stall is expected behavior in following code. */
- pr_alert("rcu_torture_stall start.\n");
- rcu_read_lock();
- preempt_disable();
- while (ULONG_CMP_LT(get_seconds(), stop_at))
- continue; /* Induce RCU CPU stall warning. */
- preempt_enable();
- rcu_read_unlock();
- pr_alert("rcu_torture_stall end.\n");
- }
- rcutorture_shutdown_absorb("rcu_torture_stall");
- while (!kthread_should_stop())
- schedule_timeout_interruptible(10 * HZ);
- return 0;
-}
-
-/* Spawn CPU-stall kthread, if stall_cpu specified. */
-static int __init rcu_torture_stall_init(void)
-{
- int ret;
-
- if (stall_cpu <= 0)
- return 0;
- stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
- if (IS_ERR(stall_task)) {
- ret = PTR_ERR(stall_task);
- stall_task = NULL;
- return ret;
- }
- return 0;
-}
-
-/* Clean up after the CPU-stall kthread, if one was spawned. */
-static void rcu_torture_stall_cleanup(void)
-{
- if (stall_task == NULL)
- return;
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
- kthread_stop(stall_task);
- stall_task = NULL;
-}
-
-/* Callback function for RCU barrier testing. */
-void rcu_torture_barrier_cbf(struct rcu_head *rcu)
-{
- atomic_inc(&barrier_cbs_invoked);
-}
-
-/* kthread function to register callbacks used to test RCU barriers. */
-static int rcu_torture_barrier_cbs(void *arg)
-{
- long myid = (long)arg;
- bool lastphase = 0;
- bool newphase;
- struct rcu_head rcu;
-
- init_rcu_head_on_stack(&rcu);
- VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started");
- set_user_nice(current, 19);
- do {
- wait_event(barrier_cbs_wq[myid],
- (newphase =
- ACCESS_ONCE(barrier_phase)) != lastphase ||
- kthread_should_stop() ||
- fullstop != FULLSTOP_DONTSTOP);
- lastphase = newphase;
- smp_mb(); /* ensure barrier_phase load before ->call(). */
- if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
- break;
- cur_ops->call(&rcu, rcu_torture_barrier_cbf);
- if (atomic_dec_and_test(&barrier_cbs_count))
- wake_up(&barrier_wq);
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
- VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping");
- rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
- while (!kthread_should_stop())
- schedule_timeout_interruptible(1);
- cur_ops->cb_barrier();
- destroy_rcu_head_on_stack(&rcu);
- return 0;
-}
-
-/* kthread function to drive and coordinate RCU barrier testing. */
-static int rcu_torture_barrier(void *arg)
-{
- int i;
-
- VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting");
- do {
- atomic_set(&barrier_cbs_invoked, 0);
- atomic_set(&barrier_cbs_count, n_barrier_cbs);
- smp_mb(); /* Ensure barrier_phase after prior assignments. */
- barrier_phase = !barrier_phase;
- for (i = 0; i < n_barrier_cbs; i++)
- wake_up(&barrier_cbs_wq[i]);
- wait_event(barrier_wq,
- atomic_read(&barrier_cbs_count) == 0 ||
- kthread_should_stop() ||
- fullstop != FULLSTOP_DONTSTOP);
- if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
- break;
- n_barrier_attempts++;
- cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
- if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
- n_rcu_torture_barrier_error++;
- WARN_ON_ONCE(1);
- }
- n_barrier_successes++;
- schedule_timeout_interruptible(HZ / 10);
- } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
- VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
- rcutorture_shutdown_absorb("rcu_torture_barrier");
- while (!kthread_should_stop())
- schedule_timeout_interruptible(1);
- return 0;
-}
-
-/* Initialize RCU barrier testing. */
-static int rcu_torture_barrier_init(void)
-{
- int i;
- int ret;
-
- if (n_barrier_cbs == 0)
- return 0;
- if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
- pr_alert("%s" TORTURE_FLAG
- " Call or barrier ops missing for %s,\n",
- torture_type, cur_ops->name);
- pr_alert("%s" TORTURE_FLAG
- " RCU barrier testing omitted from run.\n",
- torture_type);
- return 0;
- }
- atomic_set(&barrier_cbs_count, 0);
- atomic_set(&barrier_cbs_invoked, 0);
- barrier_cbs_tasks =
- kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
- GFP_KERNEL);
- barrier_cbs_wq =
- kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
- GFP_KERNEL);
- if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
- return -ENOMEM;
- for (i = 0; i < n_barrier_cbs; i++) {
- init_waitqueue_head(&barrier_cbs_wq[i]);
- barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
- (void *)(long)i,
- "rcu_torture_barrier_cbs");
- if (IS_ERR(barrier_cbs_tasks[i])) {
- ret = PTR_ERR(barrier_cbs_tasks[i]);
- VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
- barrier_cbs_tasks[i] = NULL;
- return ret;
- }
- }
- barrier_task = kthread_run(rcu_torture_barrier, NULL,
- "rcu_torture_barrier");
- if (IS_ERR(barrier_task)) {
- ret = PTR_ERR(barrier_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier");
- barrier_task = NULL;
- }
- return 0;
-}
-
-/* Clean up after RCU barrier testing. */
-static void rcu_torture_barrier_cleanup(void)
-{
- int i;
-
- if (barrier_task != NULL) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task");
- kthread_stop(barrier_task);
- barrier_task = NULL;
- }
- if (barrier_cbs_tasks != NULL) {
- for (i = 0; i < n_barrier_cbs; i++) {
- if (barrier_cbs_tasks[i] != NULL) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task");
- kthread_stop(barrier_cbs_tasks[i]);
- barrier_cbs_tasks[i] = NULL;
- }
- }
- kfree(barrier_cbs_tasks);
- barrier_cbs_tasks = NULL;
- }
- if (barrier_cbs_wq != NULL) {
- kfree(barrier_cbs_wq);
- barrier_cbs_wq = NULL;
- }
-}
-
-static int rcutorture_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- (void)rcutorture_booster_init(cpu);
- break;
- case CPU_DOWN_PREPARE:
- rcutorture_booster_cleanup(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block rcutorture_cpu_nb = {
- .notifier_call = rcutorture_cpu_notify,
-};
-
-static void
-rcu_torture_cleanup(void)
-{
- int i;
-
- mutex_lock(&fullstop_mutex);
- rcutorture_record_test_transition();
- if (fullstop == FULLSTOP_SHUTDOWN) {
- pr_warn(/* but going down anyway, so... */
- "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
- mutex_unlock(&fullstop_mutex);
- schedule_timeout_uninterruptible(10);
- if (cur_ops->cb_barrier != NULL)
- cur_ops->cb_barrier();
- return;
- }
- fullstop = FULLSTOP_RMMOD;
- mutex_unlock(&fullstop_mutex);
- unregister_reboot_notifier(&rcutorture_shutdown_nb);
- rcu_torture_barrier_cleanup();
- rcu_torture_stall_cleanup();
- if (stutter_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
- kthread_stop(stutter_task);
- }
- stutter_task = NULL;
- if (shuffler_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
- kthread_stop(shuffler_task);
- free_cpumask_var(shuffle_tmp_mask);
- }
- shuffler_task = NULL;
-
- if (writer_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
- kthread_stop(writer_task);
- }
- writer_task = NULL;
-
- if (reader_tasks) {
- for (i = 0; i < nrealreaders; i++) {
- if (reader_tasks[i]) {
- VERBOSE_PRINTK_STRING(
- "Stopping rcu_torture_reader task");
- kthread_stop(reader_tasks[i]);
- }
- reader_tasks[i] = NULL;
- }
- kfree(reader_tasks);
- reader_tasks = NULL;
- }
- rcu_torture_current = NULL;
-
- if (fakewriter_tasks) {
- for (i = 0; i < nfakewriters; i++) {
- if (fakewriter_tasks[i]) {
- VERBOSE_PRINTK_STRING(
- "Stopping rcu_torture_fakewriter task");
- kthread_stop(fakewriter_tasks[i]);
- }
- fakewriter_tasks[i] = NULL;
- }
- kfree(fakewriter_tasks);
- fakewriter_tasks = NULL;
- }
-
- if (stats_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
- kthread_stop(stats_task);
- }
- stats_task = NULL;
-
- if (fqs_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
- kthread_stop(fqs_task);
- }
- fqs_task = NULL;
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2) {
- unregister_cpu_notifier(&rcutorture_cpu_nb);
- for_each_possible_cpu(i)
- rcutorture_booster_cleanup(i);
- }
- if (shutdown_task != NULL) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
- kthread_stop(shutdown_task);
- }
- shutdown_task = NULL;
- rcu_torture_onoff_cleanup();
-
- /* Wait for all RCU callbacks to fire. */
-
- if (cur_ops->cb_barrier != NULL)
- cur_ops->cb_barrier();
-
- rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
-
- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
- rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
- else if (n_online_successes != n_online_attempts ||
- n_offline_successes != n_offline_attempts)
- rcu_torture_print_module_parms(cur_ops,
- "End of test: RCU_HOTPLUG");
- else
- rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
-}
-
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static void rcu_torture_leak_cb(struct rcu_head *rhp)
-{
-}
-
-static void rcu_torture_err_cb(struct rcu_head *rhp)
-{
- /*
- * This -might- happen due to race conditions, but is unlikely.
- * The scenario that leads to this happening is that the
- * first of the pair of duplicate callbacks is queued,
- * someone else starts a grace period that includes that
- * callback, then the second of the pair must wait for the
- * next grace period. Unlikely, but can happen. If it
- * does happen, the debug-objects subsystem won't have splatted.
- */
- pr_alert("rcutorture: duplicated callback was invoked.\n");
-}
-#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-
-/*
- * Verify that double-free causes debug-objects to complain, but only
- * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
- * cannot be carried out.
- */
-static void rcu_test_debug_objects(void)
-{
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
- struct rcu_head rh1;
- struct rcu_head rh2;
-
- init_rcu_head_on_stack(&rh1);
- init_rcu_head_on_stack(&rh2);
- pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
-
- /* Try to queue the rh2 pair of callbacks for the same grace period. */
- preempt_disable(); /* Prevent preemption from interrupting test. */
- rcu_read_lock(); /* Make it impossible to finish a grace period. */
- call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
- local_irq_disable(); /* Make it harder to start a new grace period. */
- call_rcu(&rh2, rcu_torture_leak_cb);
- call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
- local_irq_enable();
- rcu_read_unlock();
- preempt_enable();
-
- /* Wait for them all to get done so we can safely return. */
- rcu_barrier();
- pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
- destroy_rcu_head_on_stack(&rh1);
- destroy_rcu_head_on_stack(&rh2);
-#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
- pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
-#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-}
-
-static int __init
-rcu_torture_init(void)
-{
- int i;
- int cpu;
- int firsterr = 0;
- int retval;
- static struct rcu_torture_ops *torture_ops[] = {
- &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
- };
-
- mutex_lock(&fullstop_mutex);
-
- /* Process args and tell the world that the torturer is on the job. */
- for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
- cur_ops = torture_ops[i];
- if (strcmp(torture_type, cur_ops->name) == 0)
- break;
- }
- if (i == ARRAY_SIZE(torture_ops)) {
- pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
- torture_type);
- pr_alert("rcu-torture types:");
- for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
- pr_alert(" %s", torture_ops[i]->name);
- pr_alert("\n");
- mutex_unlock(&fullstop_mutex);
- return -EINVAL;
- }
- if (cur_ops->fqs == NULL && fqs_duration != 0) {
- pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
- fqs_duration = 0;
- }
- if (cur_ops->init)
- cur_ops->init(); /* no "goto unwind" prior to this point!!! */
-
- if (nreaders >= 0)
- nrealreaders = nreaders;
- else
- nrealreaders = 2 * num_online_cpus();
- rcu_torture_print_module_parms(cur_ops, "Start of test");
- fullstop = FULLSTOP_DONTSTOP;
-
- /* Set up the freelist. */
-
- INIT_LIST_HEAD(&rcu_torture_freelist);
- for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
- rcu_tortures[i].rtort_mbtest = 0;
- list_add_tail(&rcu_tortures[i].rtort_free,
- &rcu_torture_freelist);
- }
-
- /* Initialize the statistics so that each run gets its own numbers. */
-
- rcu_torture_current = NULL;
- rcu_torture_current_version = 0;
- atomic_set(&n_rcu_torture_alloc, 0);
- atomic_set(&n_rcu_torture_alloc_fail, 0);
- atomic_set(&n_rcu_torture_free, 0);
- atomic_set(&n_rcu_torture_mberror, 0);
- atomic_set(&n_rcu_torture_error, 0);
- n_rcu_torture_barrier_error = 0;
- n_rcu_torture_boost_ktrerror = 0;
- n_rcu_torture_boost_rterror = 0;
- n_rcu_torture_boost_failure = 0;
- n_rcu_torture_boosts = 0;
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- atomic_set(&rcu_torture_wcount[i], 0);
- for_each_possible_cpu(cpu) {
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- per_cpu(rcu_torture_count, cpu)[i] = 0;
- per_cpu(rcu_torture_batch, cpu)[i] = 0;
- }
- }
-
- /* Start up the kthreads. */
-
- VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
- writer_task = kthread_create(rcu_torture_writer, NULL,
- "rcu_torture_writer");
- if (IS_ERR(writer_task)) {
- firsterr = PTR_ERR(writer_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
- writer_task = NULL;
- goto unwind;
- }
- wake_up_process(writer_task);
- fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
- GFP_KERNEL);
- if (fakewriter_tasks == NULL) {
- VERBOSE_PRINTK_ERRSTRING("out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < nfakewriters; i++) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
- fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
- "rcu_torture_fakewriter");
- if (IS_ERR(fakewriter_tasks[i])) {
- firsterr = PTR_ERR(fakewriter_tasks[i]);
- VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
- fakewriter_tasks[i] = NULL;
- goto unwind;
- }
- }
- reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
- GFP_KERNEL);
- if (reader_tasks == NULL) {
- VERBOSE_PRINTK_ERRSTRING("out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < nrealreaders; i++) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
- reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
- "rcu_torture_reader");
- if (IS_ERR(reader_tasks[i])) {
- firsterr = PTR_ERR(reader_tasks[i]);
- VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
- reader_tasks[i] = NULL;
- goto unwind;
- }
- }
- if (stat_interval > 0) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
- stats_task = kthread_run(rcu_torture_stats, NULL,
- "rcu_torture_stats");
- if (IS_ERR(stats_task)) {
- firsterr = PTR_ERR(stats_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
- stats_task = NULL;
- goto unwind;
- }
- }
- if (test_no_idle_hz) {
- rcu_idle_cpu = num_online_cpus() - 1;
-
- if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
- firsterr = -ENOMEM;
- VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
- goto unwind;
- }
-
- /* Create the shuffler thread */
- shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
- "rcu_torture_shuffle");
- if (IS_ERR(shuffler_task)) {
- free_cpumask_var(shuffle_tmp_mask);
- firsterr = PTR_ERR(shuffler_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
- shuffler_task = NULL;
- goto unwind;
- }
- }
- if (stutter < 0)
- stutter = 0;
- if (stutter) {
- /* Create the stutter thread */
- stutter_task = kthread_run(rcu_torture_stutter, NULL,
- "rcu_torture_stutter");
- if (IS_ERR(stutter_task)) {
- firsterr = PTR_ERR(stutter_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
- stutter_task = NULL;
- goto unwind;
- }
- }
- if (fqs_duration < 0)
- fqs_duration = 0;
- if (fqs_duration) {
- /* Create the stutter thread */
- fqs_task = kthread_run(rcu_torture_fqs, NULL,
- "rcu_torture_fqs");
- if (IS_ERR(fqs_task)) {
- firsterr = PTR_ERR(fqs_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
- fqs_task = NULL;
- goto unwind;
- }
- }
- if (test_boost_interval < 1)
- test_boost_interval = 1;
- if (test_boost_duration < 2)
- test_boost_duration = 2;
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2) {
-
- boost_starttime = jiffies + test_boost_interval * HZ;
- register_cpu_notifier(&rcutorture_cpu_nb);
- for_each_possible_cpu(i) {
- if (cpu_is_offline(i))
- continue; /* Heuristic: CPU can go offline. */
- retval = rcutorture_booster_init(i);
- if (retval < 0) {
- firsterr = retval;
- goto unwind;
- }
- }
- }
- if (shutdown_secs > 0) {
- shutdown_time = jiffies + shutdown_secs * HZ;
- shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
- "rcu_torture_shutdown");
- if (IS_ERR(shutdown_task)) {
- firsterr = PTR_ERR(shutdown_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
- shutdown_task = NULL;
- goto unwind;
- }
- wake_up_process(shutdown_task);
- }
- i = rcu_torture_onoff_init();
- if (i != 0) {
- firsterr = i;
- goto unwind;
- }
- register_reboot_notifier(&rcutorture_shutdown_nb);
- i = rcu_torture_stall_init();
- if (i != 0) {
- firsterr = i;
- goto unwind;
- }
- retval = rcu_torture_barrier_init();
- if (retval != 0) {
- firsterr = retval;
- goto unwind;
- }
- if (object_debug)
- rcu_test_debug_objects();
- rcutorture_record_test_transition();
- mutex_unlock(&fullstop_mutex);
- return 0;
-
-unwind:
- mutex_unlock(&fullstop_mutex);
- rcu_torture_cleanup();
- return firsterr;
-}
-
-module_init(rcu_torture_init);
-module_exit(rcu_torture_cleanup);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b3d116c..0c47e30 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
@@ -58,8 +58,6 @@
#include <linux/suspend.h>
#include "tree.h"
-#include <trace/events/rcu.h>
-
#include "rcu.h"
MODULE_ALIAS("rcutree");
@@ -837,7 +835,7 @@
* to the next. Only do this for the primary flavor of RCU.
*/
if (rdp->rsp == rcu_state &&
- ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
+ ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
rdp->rsp->jiffies_resched += 5;
resched_cpu(rdp->cpu);
}
@@ -847,7 +845,7 @@
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
- unsigned long j = ACCESS_ONCE(jiffies);
+ unsigned long j = jiffies;
unsigned long j1;
rsp->gp_start = j;
@@ -1005,7 +1003,7 @@
if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
return;
- j = ACCESS_ONCE(jiffies);
+ j = jiffies;
/*
* Lots of memory barriers to reject false positives.
@@ -1423,13 +1421,14 @@
/* Advance to a new grace period and initialize state. */
record_gp_stall_check_time(rsp);
- smp_wmb(); /* Record GP times before starting GP. */
- rsp->gpnum++;
+ /* Record GP times before starting GP, hence smp_store_release(). */
+ smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
raw_spin_unlock_irq(&rnp->lock);
/* Exclude any concurrent CPU-hotplug operations. */
mutex_lock(&rsp->onoff_mutex);
+ smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
/*
* Set the quiescent-state-needed bits in all the rcu_node
@@ -1557,10 +1556,11 @@
}
rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
- smp_mb__after_unlock_lock();
+ smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
rcu_nocb_gp_set(rnp, nocb);
- rsp->completed = rsp->gpnum; /* Declare grace period done. */
+ /* Declare grace period done. */
+ ACCESS_ONCE(rsp->completed) = rsp->gpnum;
trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
rsp->fqs_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
@@ -2304,7 +2304,7 @@
if (rnp_old != NULL)
raw_spin_unlock(&rnp_old->fqslock);
if (ret) {
- rsp->n_force_qs_lh++;
+ ACCESS_ONCE(rsp->n_force_qs_lh)++;
return;
}
rnp_old = rnp;
@@ -2316,7 +2316,7 @@
smp_mb__after_unlock_lock();
raw_spin_unlock(&rnp_old->fqslock);
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
- rsp->n_force_qs_lh++;
+ ACCESS_ONCE(rsp->n_force_qs_lh)++;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
return; /* Someone beat us to it. */
}
@@ -2639,6 +2639,58 @@
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+/**
+ * get_state_synchronize_rcu - Snapshot current RCU state
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_rcu()
+ * to determine whether or not a full grace period has elapsed in the
+ * meantime.
+ */
+unsigned long get_state_synchronize_rcu(void)
+{
+ /*
+ * Any prior manipulation of RCU-protected data must happen
+ * before the load from ->gpnum.
+ */
+ smp_mb(); /* ^^^ */
+
+ /*
+ * Make sure this load happens before the purportedly
+ * time-consuming work between get_state_synchronize_rcu()
+ * and cond_synchronize_rcu().
+ */
+ return smp_load_acquire(&rcu_state->gpnum);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
+
+/**
+ * cond_synchronize_rcu - Conditionally wait for an RCU grace period
+ *
+ * @oldstate: return value from earlier call to get_state_synchronize_rcu()
+ *
+ * If a full RCU grace period has elapsed since the earlier call to
+ * get_state_synchronize_rcu(), just return. Otherwise, invoke
+ * synchronize_rcu() to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account. But
+ * counter wrap is harmless. If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for one additional grace period should be just fine.
+ */
+void cond_synchronize_rcu(unsigned long oldstate)
+{
+ unsigned long newstate;
+
+ /*
+ * Ensure that this load happens before any RCU-destructive
+ * actions the caller might carry out after we return.
+ */
+ newstate = smp_load_acquire(&rcu_state->completed);
+ if (ULONG_CMP_GE(oldstate, newstate))
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
+
static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
@@ -2880,7 +2932,7 @@
* non-NULL, store an indication of whether all callbacks are lazy.
* (If there are no callbacks, all of them are deemed to be lazy.)
*/
-static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
+static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
{
bool al = true;
bool hc = false;
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8c19873..75dc3c3 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -13,8 +13,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6e2ef4b..962d1d5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -14,8 +14,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright Red Hat, 2009
* Copyright IBM Corporation, 2009
@@ -1586,11 +1586,13 @@
* Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
* any flavor of RCU.
*/
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
{
*delta_jiffies = ULONG_MAX;
return rcu_cpu_has_callbacks(cpu, NULL);
}
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
@@ -1656,7 +1658,7 @@
* only if it has been awhile since the last time we did so. Afterwards,
* if there are any callbacks ready for immediate invocation, return true.
*/
-static bool rcu_try_advance_all_cbs(void)
+static bool __maybe_unused rcu_try_advance_all_cbs(void)
{
bool cbs_ready = false;
struct rcu_data *rdp;
@@ -1696,6 +1698,7 @@
*
* The caller must have disabled interrupts.
*/
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(int cpu, unsigned long *dj)
{
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
@@ -1726,6 +1729,7 @@
}
return 0;
}
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
@@ -1739,6 +1743,7 @@
*/
static void rcu_prepare_for_idle(int cpu)
{
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
struct rcu_data *rdp;
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
struct rcu_node *rnp;
@@ -1790,6 +1795,7 @@
rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
}
/*
@@ -1799,11 +1805,12 @@
*/
static void rcu_cleanup_after_idle(int cpu)
{
-
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
if (rcu_is_nocb_cpu(cpu))
return;
if (rcu_try_advance_all_cbs())
invoke_rcu_core();
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
}
/*
@@ -2101,6 +2108,7 @@
init_waitqueue_head(&rnp->nocb_gp_wq[1]);
}
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
/* Is the specified CPU a no-CPUs CPU? */
bool rcu_is_nocb_cpu(int cpu)
{
@@ -2108,6 +2116,7 @@
return cpumask_test_cpu(cpu, rcu_nocb_mask);
return false;
}
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
/*
* Enqueue the specified string of rcu_head structures onto the specified
@@ -2893,7 +2902,7 @@
* CPU unless the grace period has extended for too long.
*
* This code relies on the fact that all NO_HZ_FULL CPUs are also
- * CONFIG_RCU_NOCB_CPUs.
+ * CONFIG_RCU_NOCB_CPU CPUs.
*/
static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
{
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 4def475..5cdc62e 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2008
*
@@ -273,7 +273,7 @@
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
- rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
+ ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index c54609f..4c0a9b0 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -12,8 +12,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2001
*
@@ -49,7 +49,6 @@
#include <linux/module.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/rcu.h>
#include "rcu.h"
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 9a95c8c..ab32b7b 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -13,7 +13,7 @@
obj-y += core.o proc.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o completion.o
+obj-y += wait.o completion.o idle.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 4a07353..e73efba 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -203,7 +203,7 @@
struct autogroup *ag;
int err;
- if (nice < -20 || nice > 19)
+ if (nice < MIN_NICE || nice > MAX_NICE)
return -EINVAL;
err = security_task_setnice(current, nice);
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 43c2bcc..b30a292 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -301,14 +301,14 @@
if (unlikely(!sched_clock_running))
return 0ull;
- preempt_disable();
+ preempt_disable_notrace();
scd = cpu_sdc(cpu);
if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
- preempt_enable();
+ preempt_enable_notrace();
return clock;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4b0739c..a47902c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1745,8 +1745,10 @@
p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
- p->numa_faults = NULL;
- p->numa_faults_buffer = NULL;
+ p->numa_faults_memory = NULL;
+ p->numa_faults_buffer_memory = NULL;
+ p->last_task_numa_placement = 0;
+ p->last_sum_exec_runtime = 0;
INIT_LIST_HEAD(&p->numa_entry);
p->numa_group = NULL;
@@ -1952,7 +1954,7 @@
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- u64 period = attr->sched_period;
+ u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
@@ -2149,8 +2151,6 @@
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
- task_numa_free(prev);
-
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
@@ -2167,13 +2167,6 @@
#ifdef CONFIG_SMP
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
- if (prev->sched_class->pre_schedule)
- prev->sched_class->pre_schedule(rq, prev);
-}
-
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
@@ -2191,10 +2184,6 @@
#else
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
-{
-}
-
static inline void post_schedule(struct rq *rq)
{
}
@@ -2510,8 +2499,13 @@
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
- if (preempt_count() == val)
- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ if (preempt_count() == val) {
+ unsigned long ip = get_parent_ip(CALLER_ADDR1);
+#ifdef CONFIG_DEBUG_PREEMPT
+ current->preempt_disable_ip = ip;
+#endif
+ trace_preempt_off(CALLER_ADDR0, ip);
+ }
}
EXPORT_SYMBOL(preempt_count_add);
@@ -2554,6 +2548,13 @@
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
+#ifdef CONFIG_DEBUG_PREEMPT
+ if (in_atomic_preempt_off()) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
+ pr_cont("\n");
+ }
+#endif
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
@@ -2577,36 +2578,34 @@
schedstat_inc(this_rq(), sched_count);
}
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
-{
- if (prev->on_rq || rq->skip_clock_update < 0)
- update_rq_clock(rq);
- prev->sched_class->put_prev_task(rq, prev);
-}
-
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
-pick_next_task(struct rq *rq)
+pick_next_task(struct rq *rq, struct task_struct *prev)
{
- const struct sched_class *class;
+ const struct sched_class *class = &fair_sched_class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
- p = fair_sched_class.pick_next_task(rq);
- if (likely(p))
+ if (likely(prev->sched_class == class &&
+ rq->nr_running == rq->cfs.h_nr_running)) {
+ p = fair_sched_class.pick_next_task(rq, prev);
+ if (likely(p && p != RETRY_TASK))
return p;
}
+again:
for_each_class(class) {
- p = class->pick_next_task(rq);
- if (p)
+ p = class->pick_next_task(rq, prev);
+ if (p) {
+ if (unlikely(p == RETRY_TASK))
+ goto again;
return p;
+ }
}
BUG(); /* the idle class will always have a runnable task */
@@ -2700,13 +2699,10 @@
switch_count = &prev->nvcsw;
}
- pre_schedule(rq, prev);
+ if (prev->on_rq || rq->skip_clock_update < 0)
+ update_rq_clock(rq);
- if (unlikely(!rq->nr_running))
- idle_balance(cpu, rq);
-
- put_prev_task(rq, prev);
- next = pick_next_task(rq);
+ next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->skip_clock_update = 0;
@@ -2908,7 +2904,8 @@
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
- * Used by the rt_mutex code to implement priority inheritance logic.
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
@@ -2998,7 +2995,7 @@
unsigned long flags;
struct rq *rq;
- if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
return;
/*
* We have to be careful, if called from sys_setpriority(),
@@ -3076,11 +3073,11 @@
if (increment > 40)
increment = 40;
- nice = TASK_NICE(current) + increment;
- if (nice < -20)
- nice = -20;
- if (nice > 19)
- nice = 19;
+ nice = task_nice(current) + increment;
+ if (nice < MIN_NICE)
+ nice = MIN_NICE;
+ if (nice > MAX_NICE)
+ nice = MAX_NICE;
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
@@ -3109,18 +3106,6 @@
}
/**
- * task_nice - return the nice value of a given task.
- * @p: the task in question.
- *
- * Return: The nice value [ -20 ... 0 ... 19 ].
- */
-int task_nice(const struct task_struct *p)
-{
- return TASK_NICE(p);
-}
-EXPORT_SYMBOL(task_nice);
-
-/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
*
@@ -3189,9 +3174,8 @@
dl_se->dl_new = 1;
}
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
- const struct sched_attr *attr)
+static void __setscheduler_params(struct task_struct *p,
+ const struct sched_attr *attr)
{
int policy = attr->sched_policy;
@@ -3211,9 +3195,21 @@
* getparam()/getattr() don't report silly values for !rt tasks.
*/
p->rt_priority = attr->sched_priority;
-
p->normal_prio = normal_prio(p);
- p->prio = rt_mutex_getprio(p);
+ set_load_weight(p);
+}
+
+/* Actually do priority change: must hold pi & rq lock. */
+static void __setscheduler(struct rq *rq, struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ __setscheduler_params(p, attr);
+
+ /*
+ * If we get here, there was no pi waiters boosting the
+ * task. It is safe to use the normal prio.
+ */
+ p->prio = normal_prio(p);
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
@@ -3221,8 +3217,6 @@
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
-
- set_load_weight(p);
}
static void
@@ -3275,6 +3269,8 @@
const struct sched_attr *attr,
bool user)
{
+ int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
+ MAX_RT_PRIO - 1 - attr->sched_priority;
int retval, oldprio, oldpolicy = -1, on_rq, running;
int policy = attr->sched_policy;
unsigned long flags;
@@ -3319,7 +3315,7 @@
*/
if (user && !capable(CAP_SYS_NICE)) {
if (fair_policy(policy)) {
- if (attr->sched_nice < TASK_NICE(p) &&
+ if (attr->sched_nice < task_nice(p) &&
!can_nice(p, attr->sched_nice))
return -EPERM;
}
@@ -3338,12 +3334,21 @@
return -EPERM;
}
+ /*
+ * Can't set/change SCHED_DEADLINE policy at all for now
+ * (safest behavior); in the future we would like to allow
+ * unprivileged DL tasks to increase their relative deadline
+ * or reduce their runtime (both ways reducing utilization)
+ */
+ if (dl_policy(policy))
+ return -EPERM;
+
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
- if (!can_nice(p, TASK_NICE(p)))
+ if (!can_nice(p, task_nice(p)))
return -EPERM;
}
@@ -3380,16 +3385,18 @@
}
/*
- * If not changing anything there's no need to proceed further:
+ * If not changing anything there's no need to proceed further,
+ * but store a possible modification of reset_on_fork.
*/
if (unlikely(policy == p->policy)) {
- if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
+ if (fair_policy(policy) && attr->sched_nice != task_nice(p))
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
if (dl_policy(policy))
goto change;
+ p->sched_reset_on_fork = reset_on_fork;
task_rq_unlock(rq, p, &flags);
return 0;
}
@@ -3443,6 +3450,24 @@
return -EBUSY;
}
+ p->sched_reset_on_fork = reset_on_fork;
+ oldprio = p->prio;
+
+ /*
+ * Special case for priority boosted tasks.
+ *
+ * If the new priority is lower or equal (user space view)
+ * than the current (boosted) priority, we just store the new
+ * normal parameters and do not touch the scheduler class and
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+ if (rt_mutex_check_prio(p, newprio)) {
+ __setscheduler_params(p, attr);
+ task_rq_unlock(rq, p, &flags);
+ return 0;
+ }
+
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -3450,16 +3475,18 @@
if (running)
p->sched_class->put_prev_task(rq, p);
- p->sched_reset_on_fork = reset_on_fork;
-
- oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, attr);
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq)
- enqueue_task(rq, p, 0);
+ if (on_rq) {
+ /*
+ * We enqueue to tail when the priority of a task is
+ * increased (user space view).
+ */
+ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
+ }
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
@@ -3615,7 +3642,7 @@
* XXX: do we want to be lenient like existing syscalls; or do we want
* to be strict and return an error on out-of-bounds values?
*/
- attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+ attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
out:
return ret;
@@ -3661,13 +3688,14 @@
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
*/
-SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr)
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, flags)
{
struct sched_attr attr;
struct task_struct *p;
int retval;
- if (!uattr || pid < 0)
+ if (!uattr || pid < 0 || flags)
return -EINVAL;
if (sched_copy_attr(uattr, &attr))
@@ -3786,7 +3814,7 @@
attr->size = usize;
}
- ret = copy_to_user(uattr, attr, usize);
+ ret = copy_to_user(uattr, attr, attr->size);
if (ret)
return -EFAULT;
@@ -3804,8 +3832,8 @@
* @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp.
*/
-SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
- unsigned int, size)
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, size, unsigned int, flags)
{
struct sched_attr attr = {
.size = sizeof(struct sched_attr),
@@ -3814,7 +3842,7 @@
int retval;
if (!uattr || pid < 0 || size > PAGE_SIZE ||
- size < SCHED_ATTR_SIZE_VER0)
+ size < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL;
rcu_read_lock();
@@ -3835,7 +3863,7 @@
else if (task_has_rt_policy(p))
attr.sched_priority = p->rt_priority;
else
- attr.sched_nice = TASK_NICE(p);
+ attr.sched_nice = task_nice(p);
rcu_read_unlock();
@@ -4473,6 +4501,7 @@
rcu_read_unlock();
rq->curr = rq->idle = idle;
+ idle->on_rq = 1;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
@@ -4713,6 +4742,22 @@
atomic_long_add(delta, &calc_load_tasks);
}
+static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static const struct sched_class fake_sched_class = {
+ .put_prev_task = put_prev_task_fake,
+};
+
+static struct task_struct fake_task = {
+ /*
+ * Avoid pull_{rt,dl}_task()
+ */
+ .prio = MAX_PRIO + 1,
+ .sched_class = &fake_sched_class,
+};
+
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
* try_to_wake_up()->select_task_rq().
@@ -4753,7 +4798,7 @@
if (rq->nr_running == 1)
break;
- next = pick_next_task(rq);
+ next = pick_next_task(rq, &fake_task);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
@@ -4843,7 +4888,7 @@
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
- struct ctl_table *table = sd_alloc_ctl_entry(13);
+ struct ctl_table *table = sd_alloc_ctl_entry(14);
if (table == NULL)
return NULL;
@@ -4871,9 +4916,12 @@
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[11], "name", sd->name,
+ set_table_entry(&table[11], "max_newidle_lb_cost",
+ &sd->max_newidle_lb_cost,
+ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[12], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring, false);
- /* &table[12] is terminator */
+ /* &table[13] is terminator */
return table;
}
@@ -6850,7 +6898,6 @@
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
- INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
@@ -6939,7 +6986,8 @@
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+ !is_idle_task(current)) ||
system_state != SYSTEM_RUNNING || oops_in_progress)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
@@ -6957,6 +7005,13 @@
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT
+ if (!preempt_count_equals(preempt_offset)) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
+ pr_cont("\n");
+ }
+#endif
dump_stack();
}
EXPORT_SYMBOL(__might_sleep);
@@ -7010,7 +7065,7 @@
* Renice negative nice level userspace
* tasks back to 0:
*/
- if (TASK_NICE(p) < 0 && p->mm)
+ if (task_nice(p) < 0 && p->mm)
set_user_nice(p, 0);
continue;
}
@@ -7424,6 +7479,7 @@
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
int cpu, ret = 0;
+ unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
@@ -7437,10 +7493,10 @@
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
- raw_spin_lock(&dl_b->lock);
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw)
ret = -EBUSY;
- raw_spin_unlock(&dl_b->lock);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
if (ret)
break;
@@ -7453,6 +7509,7 @@
{
u64 new_bw = -1;
int cpu;
+ unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7466,9 +7523,9 @@
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
- raw_spin_lock(&dl_b->lock);
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
- raw_spin_unlock(&dl_b->lock);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
}
}
@@ -7477,7 +7534,8 @@
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
- if (sysctl_sched_rt_runtime > sysctl_sched_rt_period)
+ if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+ (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
return -EINVAL;
return 0;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 045fc74..5b9bb42 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -70,7 +70,7 @@
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
{
- WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID);
+ WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
if (dl_time_before(new_dl, cp->elements[idx].dl)) {
cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@
}
out:
- WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1);
+ WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
return best_cpu;
}
@@ -137,7 +137,7 @@
int old_idx, new_cpu;
unsigned long flags;
- WARN_ON(cpu > num_present_cpus());
+ WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->cpu_to_idx[cpu];
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 9994791..58624a6 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -142,7 +142,7 @@
p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
- index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+ index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
task_group_account_field(p, index, (__force u64) cputime);
@@ -169,7 +169,7 @@
p->gtime += cputime;
/* Add guest time to cpustat. */
- if (TASK_NICE(p) > 0) {
+ if (task_nice(p) > 0) {
cpustat[CPUTIME_NICE] += (__force u64) cputime;
cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
} else {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e09..27ef409 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@
static void update_dl_migration(struct dl_rq *dl_rq)
{
- if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) {
+ if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
@@ -135,9 +135,7 @@
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
- dl_rq->dl_nr_total++;
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
@@ -147,9 +145,7 @@
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
- dl_rq->dl_nr_total--;
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
@@ -214,6 +210,16 @@
static int push_dl_task(struct rq *rq);
+static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
+{
+ return dl_task(prev);
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+ rq->post_schedule = has_pushable_dl_tasks(rq);
+}
+
#else
static inline
@@ -236,6 +242,19 @@
{
}
+static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
+{
+ return false;
+}
+
+static inline int pull_dl_task(struct rq *rq)
+{
+ return 0;
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
@@ -566,6 +585,8 @@
return 1;
}
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
+
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
@@ -588,8 +609,8 @@
* approach need further study.
*/
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
- if (unlikely((s64)delta_exec < 0))
- delta_exec = 0;
+ if (unlikely((s64)delta_exec <= 0))
+ return;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
@@ -629,11 +650,13 @@
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_time += delta_exec;
/*
* We'll let actual RT tasks worry about the overflow here, we
- * have our own CBS to keep us inline -- see above.
+ * have our own CBS to keep us inline; only account when RT
+ * bandwidth is relevant.
*/
+ if (sched_rt_bandwidth_account(rt_rq))
+ rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
@@ -717,6 +740,7 @@
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
+ inc_nr_running(rq_of_dl_rq(dl_rq));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +754,7 @@
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
+ dec_nr_running(rq_of_dl_rq(dl_rq));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +861,6 @@
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
-
- inc_nr_running(rq);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +873,6 @@
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
-
- dec_nr_running(rq);
}
/*
@@ -944,6 +965,8 @@
resched_task(rq->curr);
}
+static int pull_dl_task(struct rq *this_rq);
+
#endif /* CONFIG_SMP */
/*
@@ -990,7 +1013,7 @@
return rb_entry(left, struct sched_dl_entity, rb_node);
}
-struct task_struct *pick_next_task_dl(struct rq *rq)
+struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
{
struct sched_dl_entity *dl_se;
struct task_struct *p;
@@ -998,9 +1021,20 @@
dl_rq = &rq->dl;
+ if (need_pull_dl_task(rq, prev))
+ pull_dl_task(rq);
+ /*
+ * When prev is DL, we may throttle it in put_prev_task().
+ * So, we update time before we check for dl_nr_running.
+ */
+ if (prev->sched_class == &dl_sched_class)
+ update_curr_dl(rq);
+
if (unlikely(!dl_rq->dl_nr_running))
return NULL;
+ put_prev_task(rq, prev);
+
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
@@ -1015,9 +1049,7 @@
start_hrtick_dl(rq, p);
#endif
-#ifdef CONFIG_SMP
- rq->post_schedule = has_pushable_dl_tasks(rq);
-#endif /* CONFIG_SMP */
+ set_post_schedule(rq);
return p;
}
@@ -1426,13 +1458,6 @@
return ret;
}
-static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
-{
- /* Try to pull other tasks here */
- if (dl_task(prev))
- pull_dl_task(rq);
-}
-
static void post_schedule_dl(struct rq *rq)
{
push_dl_tasks(rq);
@@ -1560,7 +1585,7 @@
if (unlikely(p->dl.dl_throttled))
return;
- if (p->on_rq || rq->curr != p) {
+ if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
/* Only reschedule if pushing failed */
@@ -1625,7 +1650,6 @@
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
- .pre_schedule = pre_schedule_dl,
.post_schedule = post_schedule_dl,
.task_woken = task_woken_dl,
#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index dd52e7f..f3344c3 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -321,6 +321,7 @@
P(sched_goidle);
#ifdef CONFIG_SMP
P64(avg_idle);
+ P64(max_idle_balance_cost);
#endif
P(ttwu_count);
@@ -533,15 +534,15 @@
unsigned long nr_faults = -1;
int cpu_current, home_node;
- if (p->numa_faults)
- nr_faults = p->numa_faults[2*node + i];
+ if (p->numa_faults_memory)
+ nr_faults = p->numa_faults_memory[2*node + i];
cpu_current = !i ? (task_node(p) == node) :
(pol && node_isset(node, pol->v.nodes));
home_node = (p->numa_preferred_nid == node);
- SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n",
+ SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
i, node, cpu_current, home_node, nr_faults);
}
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 966cc2b..7e9bd0b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -322,13 +322,13 @@
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
-static inline int
+static inline struct cfs_rq *
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
- return 1;
+ return se->cfs_rq;
- return 0;
+ return NULL;
}
static inline struct sched_entity *parent_entity(struct sched_entity *se)
@@ -336,17 +336,6 @@
return se->parent;
}
-/* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
-{
- int depth = 0;
-
- for_each_sched_entity(se)
- depth++;
-
- return depth;
-}
-
static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
@@ -360,8 +349,8 @@
*/
/* First walk up until both entities are at same depth */
- se_depth = depth_se(*se);
- pse_depth = depth_se(*pse);
+ se_depth = (*se)->depth;
+ pse_depth = (*pse)->depth;
while (se_depth > pse_depth) {
se_depth--;
@@ -426,12 +415,6 @@
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
- return 1;
-}
-
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
@@ -819,14 +802,6 @@
/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
unsigned int sysctl_numa_balancing_scan_delay = 1000;
-/*
- * After skipping a page migration on a shared page, skip N more numa page
- * migrations unconditionally. This reduces the number of NUMA migrations
- * in shared memory workloads, and has the effect of pulling tasks towards
- * where their memory lives, over pulling the memory towards the task.
- */
-unsigned int sysctl_numa_balancing_migrate_deferred = 16;
-
static unsigned int task_nr_scan_windows(struct task_struct *p)
{
unsigned long rss = 0;
@@ -893,10 +868,26 @@
struct list_head task_list;
struct rcu_head rcu;
+ nodemask_t active_nodes;
unsigned long total_faults;
+ /*
+ * Faults_cpu is used to decide whether memory should move
+ * towards the CPU. As a consequence, these stats are weighted
+ * more by CPU use than by memory faults.
+ */
+ unsigned long *faults_cpu;
unsigned long faults[0];
};
+/* Shared or private faults. */
+#define NR_NUMA_HINT_FAULT_TYPES 2
+
+/* Memory and CPU locality */
+#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
+
+/* Averaged statistics, and temporary buffers. */
+#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
+
pid_t task_numa_group_id(struct task_struct *p)
{
return p->numa_group ? p->numa_group->gid : 0;
@@ -904,16 +895,16 @@
static inline int task_faults_idx(int nid, int priv)
{
- return 2 * nid + priv;
+ return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
}
static inline unsigned long task_faults(struct task_struct *p, int nid)
{
- if (!p->numa_faults)
+ if (!p->numa_faults_memory)
return 0;
- return p->numa_faults[task_faults_idx(nid, 0)] +
- p->numa_faults[task_faults_idx(nid, 1)];
+ return p->numa_faults_memory[task_faults_idx(nid, 0)] +
+ p->numa_faults_memory[task_faults_idx(nid, 1)];
}
static inline unsigned long group_faults(struct task_struct *p, int nid)
@@ -925,6 +916,12 @@
p->numa_group->faults[task_faults_idx(nid, 1)];
}
+static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
+{
+ return group->faults_cpu[task_faults_idx(nid, 0)] +
+ group->faults_cpu[task_faults_idx(nid, 1)];
+}
+
/*
* These return the fraction of accesses done by a particular task, or
* task group, on a particular numa node. The group weight is given a
@@ -935,7 +932,7 @@
{
unsigned long total_faults;
- if (!p->numa_faults)
+ if (!p->numa_faults_memory)
return 0;
total_faults = p->total_numa_faults;
@@ -954,6 +951,69 @@
return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
}
+bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
+ int src_nid, int dst_cpu)
+{
+ struct numa_group *ng = p->numa_group;
+ int dst_nid = cpu_to_node(dst_cpu);
+ int last_cpupid, this_cpupid;
+
+ this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+
+ /*
+ * Multi-stage node selection is used in conjunction with a periodic
+ * migration fault to build a temporal task<->page relation. By using
+ * a two-stage filter we remove short/unlikely relations.
+ *
+ * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
+ * a task's usage of a particular page (n_p) per total usage of this
+ * page (n_t) (in a given time-span) to a probability.
+ *
+ * Our periodic faults will sample this probability and getting the
+ * same result twice in a row, given these samples are fully
+ * independent, is then given by P(n)^2, provided our sample period
+ * is sufficiently short compared to the usage pattern.
+ *
+ * This quadric squishes small probabilities, making it less likely we
+ * act on an unlikely task<->page relation.
+ */
+ last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+ if (!cpupid_pid_unset(last_cpupid) &&
+ cpupid_to_nid(last_cpupid) != dst_nid)
+ return false;
+
+ /* Always allow migrate on private faults */
+ if (cpupid_match_pid(p, last_cpupid))
+ return true;
+
+ /* A shared fault, but p->numa_group has not been set up yet. */
+ if (!ng)
+ return true;
+
+ /*
+ * Do not migrate if the destination is not a node that
+ * is actively used by this numa group.
+ */
+ if (!node_isset(dst_nid, ng->active_nodes))
+ return false;
+
+ /*
+ * Source is a node that is not actively used by this
+ * numa group, while the destination is. Migrate.
+ */
+ if (!node_isset(src_nid, ng->active_nodes))
+ return true;
+
+ /*
+ * Both source and destination are nodes in active
+ * use by this numa group. Maximize memory bandwidth
+ * by migrating from more heavily used groups, to less
+ * heavily used ones, spreading the load around.
+ * Use a 1/4 hysteresis to avoid spurious page movement.
+ */
+ return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
+}
+
static unsigned long weighted_cpuload(const int cpu);
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
@@ -1267,7 +1327,7 @@
static void numa_migrate_preferred(struct task_struct *p)
{
/* This task has no NUMA fault statistics yet */
- if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
return;
/* Periodically retry migrating the task to the preferred node */
@@ -1282,6 +1342,38 @@
}
/*
+ * Find the nodes on which the workload is actively running. We do this by
+ * tracking the nodes from which NUMA hinting faults are triggered. This can
+ * be different from the set of nodes where the workload's memory is currently
+ * located.
+ *
+ * The bitmask is used to make smarter decisions on when to do NUMA page
+ * migrations, To prevent flip-flopping, and excessive page migrations, nodes
+ * are added when they cause over 6/16 of the maximum number of faults, but
+ * only removed when they drop below 3/16.
+ */
+static void update_numa_active_node_mask(struct numa_group *numa_group)
+{
+ unsigned long faults, max_faults = 0;
+ int nid;
+
+ for_each_online_node(nid) {
+ faults = group_faults_cpu(numa_group, nid);
+ if (faults > max_faults)
+ max_faults = faults;
+ }
+
+ for_each_online_node(nid) {
+ faults = group_faults_cpu(numa_group, nid);
+ if (!node_isset(nid, numa_group->active_nodes)) {
+ if (faults > max_faults * 6 / 16)
+ node_set(nid, numa_group->active_nodes);
+ } else if (faults < max_faults * 3 / 16)
+ node_clear(nid, numa_group->active_nodes);
+ }
+}
+
+/*
* When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
* increments. The more local the fault statistics are, the higher the scan
* period will be for the next scan window. If local/remote ratio is below
@@ -1355,11 +1447,41 @@
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
+/*
+ * Get the fraction of time the task has been running since the last
+ * NUMA placement cycle. The scheduler keeps similar statistics, but
+ * decays those on a 32ms period, which is orders of magnitude off
+ * from the dozens-of-seconds NUMA balancing period. Use the scheduler
+ * stats only if the task is so new there are no NUMA statistics yet.
+ */
+static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
+{
+ u64 runtime, delta, now;
+ /* Use the start of this time slice to avoid calculations. */
+ now = p->se.exec_start;
+ runtime = p->se.sum_exec_runtime;
+
+ if (p->last_task_numa_placement) {
+ delta = runtime - p->last_sum_exec_runtime;
+ *period = now - p->last_task_numa_placement;
+ } else {
+ delta = p->se.avg.runnable_avg_sum;
+ *period = p->se.avg.runnable_avg_period;
+ }
+
+ p->last_sum_exec_runtime = runtime;
+ p->last_task_numa_placement = now;
+
+ return delta;
+}
+
static void task_numa_placement(struct task_struct *p)
{
int seq, nid, max_nid = -1, max_group_nid = -1;
unsigned long max_faults = 0, max_group_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
+ unsigned long total_faults;
+ u64 runtime, period;
spinlock_t *group_lock = NULL;
seq = ACCESS_ONCE(p->mm->numa_scan_seq);
@@ -1368,6 +1490,10 @@
p->numa_scan_seq = seq;
p->numa_scan_period_max = task_scan_max(p);
+ total_faults = p->numa_faults_locality[0] +
+ p->numa_faults_locality[1];
+ runtime = numa_get_avg_runtime(p, &period);
+
/* If the task is part of a group prevent parallel updates to group stats */
if (p->numa_group) {
group_lock = &p->numa_group->lock;
@@ -1379,24 +1505,37 @@
unsigned long faults = 0, group_faults = 0;
int priv, i;
- for (priv = 0; priv < 2; priv++) {
- long diff;
+ for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
+ long diff, f_diff, f_weight;
i = task_faults_idx(nid, priv);
- diff = -p->numa_faults[i];
/* Decay existing window, copy faults since last scan */
- p->numa_faults[i] >>= 1;
- p->numa_faults[i] += p->numa_faults_buffer[i];
- fault_types[priv] += p->numa_faults_buffer[i];
- p->numa_faults_buffer[i] = 0;
+ diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
+ fault_types[priv] += p->numa_faults_buffer_memory[i];
+ p->numa_faults_buffer_memory[i] = 0;
- faults += p->numa_faults[i];
- diff += p->numa_faults[i];
+ /*
+ * Normalize the faults_from, so all tasks in a group
+ * count according to CPU use, instead of by the raw
+ * number of faults. Tasks with little runtime have
+ * little over-all impact on throughput, and thus their
+ * faults are less important.
+ */
+ f_weight = div64_u64(runtime << 16, period + 1);
+ f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
+ (total_faults + 1);
+ f_diff = f_weight - p->numa_faults_cpu[i] / 2;
+ p->numa_faults_buffer_cpu[i] = 0;
+
+ p->numa_faults_memory[i] += diff;
+ p->numa_faults_cpu[i] += f_diff;
+ faults += p->numa_faults_memory[i];
p->total_numa_faults += diff;
if (p->numa_group) {
/* safe because we can only change our own group */
p->numa_group->faults[i] += diff;
+ p->numa_group->faults_cpu[i] += f_diff;
p->numa_group->total_faults += diff;
group_faults += p->numa_group->faults[i];
}
@@ -1416,6 +1555,7 @@
update_task_scan_period(p, fault_types[0], fault_types[1]);
if (p->numa_group) {
+ update_numa_active_node_mask(p->numa_group);
/*
* If the preferred task and group nids are different,
* iterate over the nodes again to find the best place.
@@ -1465,7 +1605,7 @@
if (unlikely(!p->numa_group)) {
unsigned int size = sizeof(struct numa_group) +
- 2*nr_node_ids*sizeof(unsigned long);
+ 4*nr_node_ids*sizeof(unsigned long);
grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!grp)
@@ -1475,9 +1615,14 @@
spin_lock_init(&grp->lock);
INIT_LIST_HEAD(&grp->task_list);
grp->gid = p->pid;
+ /* Second half of the array tracks nids where faults happen */
+ grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
+ nr_node_ids;
- for (i = 0; i < 2*nr_node_ids; i++)
- grp->faults[i] = p->numa_faults[i];
+ node_set(task_node(current), grp->active_nodes);
+
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+ grp->faults[i] = p->numa_faults_memory[i];
grp->total_faults = p->total_numa_faults;
@@ -1534,9 +1679,9 @@
double_lock(&my_grp->lock, &grp->lock);
- for (i = 0; i < 2*nr_node_ids; i++) {
- my_grp->faults[i] -= p->numa_faults[i];
- grp->faults[i] += p->numa_faults[i];
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
+ my_grp->faults[i] -= p->numa_faults_memory[i];
+ grp->faults[i] += p->numa_faults_memory[i];
}
my_grp->total_faults -= p->total_numa_faults;
grp->total_faults += p->total_numa_faults;
@@ -1562,12 +1707,12 @@
{
struct numa_group *grp = p->numa_group;
int i;
- void *numa_faults = p->numa_faults;
+ void *numa_faults = p->numa_faults_memory;
if (grp) {
spin_lock(&grp->lock);
- for (i = 0; i < 2*nr_node_ids; i++)
- grp->faults[i] -= p->numa_faults[i];
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+ grp->faults[i] -= p->numa_faults_memory[i];
grp->total_faults -= p->total_numa_faults;
list_del(&p->numa_entry);
@@ -1577,18 +1722,21 @@
put_numa_group(grp);
}
- p->numa_faults = NULL;
- p->numa_faults_buffer = NULL;
+ p->numa_faults_memory = NULL;
+ p->numa_faults_buffer_memory = NULL;
+ p->numa_faults_cpu= NULL;
+ p->numa_faults_buffer_cpu = NULL;
kfree(numa_faults);
}
/*
* Got a PROT_NONE fault for a page on @node.
*/
-void task_numa_fault(int last_cpupid, int node, int pages, int flags)
+void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
{
struct task_struct *p = current;
bool migrated = flags & TNF_MIGRATED;
+ int cpu_node = task_node(current);
int priv;
if (!numabalancing_enabled)
@@ -1603,16 +1751,24 @@
return;
/* Allocate buffer to track faults on a per-node basis */
- if (unlikely(!p->numa_faults)) {
- int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
+ if (unlikely(!p->numa_faults_memory)) {
+ int size = sizeof(*p->numa_faults_memory) *
+ NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
- /* numa_faults and numa_faults_buffer share the allocation */
- p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
- if (!p->numa_faults)
+ p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
+ if (!p->numa_faults_memory)
return;
- BUG_ON(p->numa_faults_buffer);
- p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
+ BUG_ON(p->numa_faults_buffer_memory);
+ /*
+ * The averaged statistics, shared & private, memory & cpu,
+ * occupy the first half of the array. The second half of the
+ * array is for current counters, which are averaged into the
+ * first set by task_numa_placement.
+ */
+ p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
+ p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
+ p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
p->total_numa_faults = 0;
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
@@ -1641,7 +1797,8 @@
if (migrated)
p->numa_pages_migrated += pages;
- p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
+ p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
+ p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
}
@@ -1757,6 +1914,8 @@
start = end;
if (pages <= 0)
goto out;
+
+ cond_resched();
} while (end != vma->vm_end);
}
@@ -2217,13 +2376,20 @@
se->avg.load_avg_contrib >>= NICE_0_SHIFT;
}
}
-#else
+
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
+{
+ __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
+ __update_tg_runnable_avg(&rq->avg, &rq->cfs);
+}
+#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
int force_update) {}
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq) {}
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-#endif
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_task_entity_contrib(struct sched_entity *se)
{
@@ -2321,12 +2487,6 @@
__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
-{
- __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
- __update_tg_runnable_avg(&rq->avg, &rq->cfs);
-}
-
/* Add the load generated by se into cfs_rq's child load-average */
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
struct sched_entity *se,
@@ -2414,7 +2574,10 @@
update_rq_runnable_avg(this_rq, 0);
}
-#else
+static int idle_balance(struct rq *this_rq);
+
+#else /* CONFIG_SMP */
+
static inline void update_entity_load_avg(struct sched_entity *se,
int update_cfs_rq) {}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
@@ -2426,7 +2589,13 @@
int sleep) {}
static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
int force_update) {}
-#endif
+
+static inline int idle_balance(struct rq *rq)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -2576,10 +2745,10 @@
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->last == se)
- cfs_rq->last = NULL;
- else
+ if (cfs_rq->last != se)
break;
+
+ cfs_rq->last = NULL;
}
}
@@ -2587,10 +2756,10 @@
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->next == se)
- cfs_rq->next = NULL;
- else
+ if (cfs_rq->next != se)
break;
+
+ cfs_rq->next = NULL;
}
}
@@ -2598,10 +2767,10 @@
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->skip == se)
- cfs_rq->skip = NULL;
- else
+ if (cfs_rq->skip != se)
break;
+
+ cfs_rq->skip = NULL;
}
}
@@ -2744,17 +2913,36 @@
* 3) pick the "last" process, for cache locality
* 4) do not run the "skip" process, if something else is available
*/
-static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- struct sched_entity *se = __pick_first_entity(cfs_rq);
- struct sched_entity *left = se;
+ struct sched_entity *left = __pick_first_entity(cfs_rq);
+ struct sched_entity *se;
+
+ /*
+ * If curr is set we have to see if its left of the leftmost entity
+ * still in the tree, provided there was anything in the tree at all.
+ */
+ if (!left || (curr && entity_before(curr, left)))
+ left = curr;
+
+ se = left; /* ideally we run the leftmost entity */
/*
* Avoid running the skip buddy, if running something else can
* be done without getting too unfair.
*/
if (cfs_rq->skip == se) {
- struct sched_entity *second = __pick_next_entity(se);
+ struct sched_entity *second;
+
+ if (se == curr) {
+ second = __pick_first_entity(cfs_rq);
+ } else {
+ second = __pick_next_entity(se);
+ if (!second || (curr && entity_before(curr, second)))
+ second = curr;
+ }
+
if (second && wakeup_preempt_entity(second, left) < 1)
se = second;
}
@@ -2776,7 +2964,7 @@
return se;
}
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
{
@@ -3431,22 +3619,23 @@
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
- return;
+ return false;
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
- return;
+ return false;
/*
* it's possible for a throttled entity to be forced into a running
* state (e.g. set_curr_task), in this case we're finished.
*/
if (cfs_rq_throttled(cfs_rq))
- return;
+ return true;
throttle_cfs_rq(cfs_rq);
+ return true;
}
static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
@@ -3556,7 +3745,7 @@
}
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -4211,13 +4400,14 @@
}
/*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
+ * select_task_rq_fair: Select target runqueue for the waking task in domains
+ * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
+ * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
*
- * Balance, ie. select the least loaded group.
+ * Balances load by selecting the idlest cpu in the idlest group, or under
+ * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
*
- * Returns the target CPU number, or the same CPU if no balancing is needed.
+ * Returns the target cpu number.
*
* preempt must be disabled.
*/
@@ -4492,26 +4682,124 @@
set_last_buddy(se);
}
-static struct task_struct *pick_next_task_fair(struct rq *rq)
+static struct task_struct *
+pick_next_task_fair(struct rq *rq, struct task_struct *prev)
{
- struct task_struct *p;
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
+ struct task_struct *p;
+ int new_tasks;
+again:
+#ifdef CONFIG_FAIR_GROUP_SCHED
if (!cfs_rq->nr_running)
- return NULL;
+ goto idle;
+
+ if (prev->sched_class != &fair_sched_class)
+ goto simple;
+
+ /*
+ * Because of the set_next_buddy() in dequeue_task_fair() it is rather
+ * likely that a next task is from the same cgroup as the current.
+ *
+ * Therefore attempt to avoid putting and setting the entire cgroup
+ * hierarchy, only change the part that actually changes.
+ */
do {
- se = pick_next_entity(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+
+ /*
+ * Since we got here without doing put_prev_entity() we also
+ * have to consider cfs_rq->curr. If it is still a runnable
+ * entity, update_curr() will update its vruntime, otherwise
+ * forget we've ever seen it.
+ */
+ if (curr && curr->on_rq)
+ update_curr(cfs_rq);
+ else
+ curr = NULL;
+
+ /*
+ * This call to check_cfs_rq_runtime() will do the throttle and
+ * dequeue its entity in the parent(s). Therefore the 'simple'
+ * nr_running test will indeed be correct.
+ */
+ if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+ goto simple;
+
+ se = pick_next_entity(cfs_rq, curr);
+ cfs_rq = group_cfs_rq(se);
+ } while (cfs_rq);
+
+ p = task_of(se);
+
+ /*
+ * Since we haven't yet done put_prev_entity and if the selected task
+ * is a different task than we started out with, try and touch the
+ * least amount of cfs_rqs.
+ */
+ if (prev != p) {
+ struct sched_entity *pse = &prev->se;
+
+ while (!(cfs_rq = is_same_group(se, pse))) {
+ int se_depth = se->depth;
+ int pse_depth = pse->depth;
+
+ if (se_depth <= pse_depth) {
+ put_prev_entity(cfs_rq_of(pse), pse);
+ pse = parent_entity(pse);
+ }
+ if (se_depth >= pse_depth) {
+ set_next_entity(cfs_rq_of(se), se);
+ se = parent_entity(se);
+ }
+ }
+
+ put_prev_entity(cfs_rq, pse);
+ set_next_entity(cfs_rq, se);
+ }
+
+ if (hrtick_enabled(rq))
+ hrtick_start_fair(rq, p);
+
+ return p;
+simple:
+ cfs_rq = &rq->cfs;
+#endif
+
+ if (!cfs_rq->nr_running)
+ goto idle;
+
+ put_prev_task(rq, prev);
+
+ do {
+ se = pick_next_entity(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
+
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
return p;
+
+idle:
+ new_tasks = idle_balance(rq);
+ /*
+ * Because idle_balance() releases (and re-acquires) rq->lock, it is
+ * possible for any higher priority task to appear. In that case we
+ * must re-start the pick_next_entity() loop.
+ */
+ if (new_tasks < 0)
+ return RETRY_TASK;
+
+ if (new_tasks > 0)
+ goto again;
+
+ return NULL;
}
/*
@@ -4749,7 +5037,7 @@
* Is this task likely cache-hot:
*/
static int
-task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+task_hot(struct task_struct *p, u64 now)
{
s64 delta;
@@ -4783,7 +5071,7 @@
{
int src_nid, dst_nid;
- if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
+ if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
!(env->sd->flags & SD_NUMA)) {
return false;
}
@@ -4814,7 +5102,7 @@
if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
return false;
- if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
+ if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
return false;
src_nid = cpu_to_node(env->src_cpu);
@@ -4910,7 +5198,7 @@
* 2) task is cache cold, or
* 3) too many balance attempts have failed.
*/
- tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
+ tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
if (!tsk_cache_hot)
tsk_cache_hot = migrate_degrades_locality(p, env);
@@ -5773,12 +6061,10 @@
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
- tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
- busiest->group_power;
- if (busiest->avg_load > tmp) {
+ if (busiest->avg_load > scaled_busy_load_per_task) {
pwr_move += busiest->group_power *
min(busiest->load_per_task,
- busiest->avg_load - tmp);
+ busiest->avg_load - scaled_busy_load_per_task);
}
/* Amount of load we'd add */
@@ -6357,17 +6643,23 @@
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
-void idle_balance(int this_cpu, struct rq *this_rq)
+static int idle_balance(struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
u64 curr_cost = 0;
+ int this_cpu = this_rq->cpu;
+ idle_enter_fair(this_rq);
+ /*
+ * We must set idle_stamp _before_ calling idle_balance(), such that we
+ * measure the duration of idle_balance() as idle time.
+ */
this_rq->idle_stamp = rq_clock(this_rq);
if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
+ goto out;
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6405,15 +6697,22 @@
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
- if (pulled_task) {
- this_rq->idle_stamp = 0;
+ if (pulled_task)
break;
- }
}
rcu_read_unlock();
raw_spin_lock(&this_rq->lock);
+ /*
+ * While browsing the domains, we released the rq lock.
+ * A task could have be enqueued in the meantime
+ */
+ if (this_rq->cfs.h_nr_running && !pulled_task) {
+ pulled_task = 1;
+ goto out;
+ }
+
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
* We are going idle. next_balance may be set based on
@@ -6424,6 +6723,20 @@
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
+
+out:
+ /* Is there a task of a high priority class? */
+ if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
+ (this_rq->dl.dl_nr_running ||
+ (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
+ pulled_task = -1;
+
+ if (pulled_task) {
+ idle_exit_fair(this_rq);
+ this_rq->idle_stamp = 0;
+ }
+
+ return pulled_task;
}
/*
@@ -6494,6 +6807,11 @@
return 0;
}
+static inline int on_null_domain(struct rq *rq)
+{
+ return unlikely(!rcu_dereference_sched(rq->sd));
+}
+
#ifdef CONFIG_NO_HZ_COMMON
/*
* idle load balancing details
@@ -6548,8 +6866,13 @@
static inline void nohz_balance_exit_idle(int cpu)
{
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
+ /*
+ * Completely isolated CPUs don't ever set, so we must test.
+ */
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+ }
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
}
@@ -6603,6 +6926,12 @@
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
return;
+ /*
+ * If we're a completely isolated CPU, we don't play.
+ */
+ if (on_null_domain(cpu_rq(cpu)))
+ return;
+
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
@@ -6865,11 +7194,6 @@
nohz_idle_balance(this_rq, idle);
}
-static inline int on_null_domain(struct rq *rq)
-{
- return !rcu_dereference_sched(rq->sd);
-}
-
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*/
@@ -6999,15 +7323,15 @@
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/*
- * Ensure the task's vruntime is normalized, so that when its
+ * Ensure the task's vruntime is normalized, so that when it's
* switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing.
*
- * If it was on_rq, then the dequeue_entity(.flags=0) will already
- * have normalized the vruntime, if it was !on_rq, then only when
+ * If it's on_rq, then the dequeue_entity(.flags=0) will already
+ * have normalized the vruntime, if it's !on_rq, then only when
* the task is sleeping will it still have non-normalized vruntime.
*/
- if (!se->on_rq && p->state != TASK_RUNNING) {
+ if (!p->on_rq && p->state != TASK_RUNNING) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
@@ -7034,7 +7358,15 @@
*/
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
- if (!p->se.on_rq)
+ struct sched_entity *se = &p->se;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /*
+ * Since the real-depth could have been changed (only FAIR
+ * class maintain depth value), reset depth properly.
+ */
+ se->depth = se->parent ? se->parent->depth + 1 : 0;
+#endif
+ if (!se->on_rq)
return;
/*
@@ -7082,7 +7414,9 @@
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq)
{
+ struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq;
+
/*
* If the task was not on the rq at the time of this cgroup movement
* it must have been asleep, sleeping tasks keep their ->vruntime
@@ -7108,23 +7442,24 @@
* To prevent boost or penalty in the new cfs_rq caused by delta
* min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
*/
- if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
+ if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
on_rq = 1;
if (!on_rq)
- p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ se->vruntime -= cfs_rq_of(se)->min_vruntime;
set_task_rq(p, task_cpu(p));
+ se->depth = se->parent ? se->parent->depth + 1 : 0;
if (!on_rq) {
- cfs_rq = cfs_rq_of(&p->se);
- p->se.vruntime += cfs_rq->min_vruntime;
+ cfs_rq = cfs_rq_of(se);
+ se->vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP
/*
* migrate_task_rq_fair() will have removed our previous
* contribution, but we must synchronize for ongoing future
* decay.
*/
- p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
- cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
+ se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
+ cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
#endif
}
}
@@ -7220,10 +7555,13 @@
if (!se)
return;
- if (!parent)
+ if (!parent) {
se->cfs_rq = &rq->cfs;
- else
+ se->depth = 0;
+ } else {
se->cfs_rq = parent->my_q;
+ se->depth = parent->depth + 1;
+ }
se->my_q = cfs_rq;
/* guarantee group entities always have weight */
diff --git a/kernel/cpu/idle.c b/kernel/sched/idle.c
similarity index 95%
rename from kernel/cpu/idle.c
rename to kernel/sched/idle.c
index 277f494..b7976a1 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/sched/idle.c
@@ -3,6 +3,7 @@
*/
#include <linux/sched.h>
#include <linux/cpu.h>
+#include <linux/cpuidle.h>
#include <linux/tick.h>
#include <linux/mm.h>
#include <linux/stackprotector.h>
@@ -95,8 +96,10 @@
if (!current_clr_polling_and_test()) {
stop_critical_timings();
rcu_idle_enter();
- arch_cpu_idle();
- WARN_ON_ONCE(irqs_disabled());
+ if (cpuidle_idle_call())
+ arch_cpu_idle();
+ if (WARN_ON_ONCE(irqs_disabled()))
+ local_irq_enable();
rcu_idle_exit();
start_critical_timings();
} else {
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 516c3d9..879f2b7 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -13,18 +13,8 @@
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
-
-static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
-{
- idle_exit_fair(rq);
- rq_last_tick_reset(rq);
-}
-
-static void post_schedule_idle(struct rq *rq)
-{
- idle_enter_fair(rq);
-}
#endif /* CONFIG_SMP */
+
/*
* Idle tasks are unconditionally rescheduled:
*/
@@ -33,13 +23,12 @@
resched_task(rq->idle);
}
-static struct task_struct *pick_next_task_idle(struct rq *rq)
+static struct task_struct *
+pick_next_task_idle(struct rq *rq, struct task_struct *prev)
{
+ put_prev_task(rq, prev);
+
schedstat_inc(rq, sched_goidle);
-#ifdef CONFIG_SMP
- /* Trigger the post schedule to do an idle_enter for CFS */
- rq->post_schedule = 1;
-#endif
return rq->idle;
}
@@ -58,6 +47,8 @@
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
+ idle_exit_fair(rq);
+ rq_last_tick_reset(rq);
}
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
@@ -101,8 +92,6 @@
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
- .pre_schedule = pre_schedule_idle,
- .post_schedule = post_schedule_idle,
#endif
.set_curr_task = set_curr_task_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a2740b7..d8cdf16 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -229,6 +229,14 @@
#ifdef CONFIG_SMP
+static int pull_rt_task(struct rq *this_rq);
+
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+ /* Try to pull RT tasks here if we lower this rq's prio */
+ return rq->rt.highest_prio.curr > prev->prio;
+}
+
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
@@ -315,6 +323,15 @@
return !plist_head_empty(&rq->rt.pushable_tasks);
}
+static inline void set_post_schedule(struct rq *rq)
+{
+ /*
+ * We detect this state here so that we can avoid taking the RQ
+ * lock again later if there is no need to push
+ */
+ rq->post_schedule = has_pushable_tasks(rq);
+}
+
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,6 +376,19 @@
{
}
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+ return false;
+}
+
+static inline int pull_rt_task(struct rq *this_rq)
+{
+ return 0;
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -440,11 +470,6 @@
dequeue_rt_entity(rt_se);
}
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
-}
-
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -515,11 +540,6 @@
{
}
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_throttled;
-}
-
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
@@ -538,6 +558,14 @@
#endif /* CONFIG_RT_GROUP_SCHED */
+bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
+{
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+ return (hrtimer_active(&rt_b->rt_period_timer) ||
+ rt_rq->rt_time < rt_b->rt_runtime);
+}
+
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
@@ -1310,15 +1338,7 @@
{
struct sched_rt_entity *rt_se;
struct task_struct *p;
- struct rt_rq *rt_rq;
-
- rt_rq = &rq->rt;
-
- if (!rt_rq->rt_nr_running)
- return NULL;
-
- if (rt_rq_throttled(rt_rq))
- return NULL;
+ struct rt_rq *rt_rq = &rq->rt;
do {
rt_se = pick_next_rt_entity(rq, rt_rq);
@@ -1332,21 +1352,45 @@
return p;
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *
+pick_next_task_rt(struct rq *rq, struct task_struct *prev)
{
- struct task_struct *p = _pick_next_task_rt(rq);
+ struct task_struct *p;
+ struct rt_rq *rt_rq = &rq->rt;
+
+ if (need_pull_rt_task(rq, prev)) {
+ pull_rt_task(rq);
+ /*
+ * pull_rt_task() can drop (and re-acquire) rq->lock; this
+ * means a dl task can slip in, in which case we need to
+ * re-start task selection.
+ */
+ if (unlikely(rq->dl.dl_nr_running))
+ return RETRY_TASK;
+ }
+
+ /*
+ * We may dequeue prev's rt_rq in put_prev_task().
+ * So, we update time before rt_nr_running check.
+ */
+ if (prev->sched_class == &rt_sched_class)
+ update_curr_rt(rq);
+
+ if (!rt_rq->rt_nr_running)
+ return NULL;
+
+ if (rt_rq_throttled(rt_rq))
+ return NULL;
+
+ put_prev_task(rq, prev);
+
+ p = _pick_next_task_rt(rq);
/* The running task is never eligible for pushing */
if (p)
dequeue_pushable_task(rq, p);
-#ifdef CONFIG_SMP
- /*
- * We detect this state here so that we can avoid taking the RQ
- * lock again later if there is no need to push
- */
- rq->post_schedule = has_pushable_tasks(rq);
-#endif
+ set_post_schedule(rq);
return p;
}
@@ -1716,13 +1760,6 @@
return ret;
}
-static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
-{
- /* Try to pull RT tasks here if we lower this rq's prio */
- if (rq->rt.highest_prio.curr > prev->prio)
- pull_rt_task(rq);
-}
-
static void post_schedule_rt(struct rq *rq)
{
push_rt_tasks(rq);
@@ -1825,7 +1862,7 @@
resched_task(rq->curr);
}
-void init_sched_rt_class(void)
+void __init init_sched_rt_class(void)
{
unsigned int i;
@@ -1999,7 +2036,6 @@
.set_cpus_allowed = set_cpus_allowed_rt,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
- .pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd..f2de7a1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -24,24 +24,6 @@
extern void update_cpu_load_active(struct rq *this_rq);
/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
@@ -441,6 +423,18 @@
#endif
};
+#ifdef CONFIG_RT_GROUP_SCHED
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+#else
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_throttled;
+}
+#endif
+
/* Deadline class' related fields in a runqueue */
struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */
@@ -462,7 +456,6 @@
} earliest_dl;
unsigned long dl_nr_migratory;
- unsigned long dl_nr_total;
int overloaded;
/*
@@ -559,11 +552,9 @@
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-#ifdef CONFIG_RT_GROUP_SCHED
- struct list_head leaf_rt_rq_list;
-#endif
+ struct sched_avg avg;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* This is part of a global counter where only the total sum
@@ -652,8 +643,6 @@
#ifdef CONFIG_SMP
struct llist_head wake_list;
#endif
-
- struct sched_avg avg;
};
static inline int cpu_of(struct rq *rq)
@@ -1113,6 +1102,8 @@
#define DEQUEUE_SLEEP 1
+#define RETRY_TASK ((void *)-1UL)
+
struct sched_class {
const struct sched_class *next;
@@ -1123,14 +1114,22 @@
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
- struct task_struct * (*pick_next_task) (struct rq *rq);
+ /*
+ * It is the responsibility of the pick_next_task() method that will
+ * return the next task to call put_prev_task() on the @prev task or
+ * something equivalent.
+ *
+ * May return RETRY_TASK when it finds a higher prio class has runnable
+ * tasks.
+ */
+ struct task_struct * (*pick_next_task) (struct rq *rq,
+ struct task_struct *prev);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
- void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1160,6 +1159,11 @@
#endif
};
+static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
+{
+ prev->sched_class->put_prev_task(rq, prev);
+}
+
#define sched_class_highest (&stop_sched_class)
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)
@@ -1176,16 +1180,14 @@
extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
-extern void idle_balance(int this_cpu, struct rq *this_rq);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);
-#else /* CONFIG_SMP */
+#else
-static inline void idle_balance(int cpu, struct rq *rq)
-{
-}
+static inline void idle_enter_fair(struct rq *rq) { }
+static inline void idle_exit_fair(struct rq *rq) { }
#endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index fdb6bb0..d6ce65d 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -23,16 +23,19 @@
/* we're never preempted */
}
-static struct task_struct *pick_next_task_stop(struct rq *rq)
+static struct task_struct *
+pick_next_task_stop(struct rq *rq, struct task_struct *prev)
{
struct task_struct *stop = rq->stop;
- if (stop && stop->on_rq) {
- stop->se.exec_start = rq_clock_task(rq);
- return stop;
- }
+ if (!stop || !stop->on_rq)
+ return NULL;
- return NULL;
+ put_prev_task(rq, prev);
+
+ stop->se.exec_start = rq_clock_task(rq);
+
+ return stop;
}
static void
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 84571e0..01fbae5 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -293,7 +293,7 @@
*/
smp_call_function_single(min(cpu1, cpu2),
&irq_cpu_stop_queue_work,
- &call_args, 0);
+ &call_args, 1);
lg_local_unlock(&stop_cpus_lock);
preempt_enable();
diff --git a/kernel/sys.c b/kernel/sys.c
index c0a58be..adaeab6 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -174,10 +174,10 @@
/* normalize: avoid signed division (rounding problems) */
error = -ESRCH;
- if (niceval < -20)
- niceval = -20;
- if (niceval > 19)
- niceval = 19;
+ if (niceval < MIN_NICE)
+ niceval = MIN_NICE;
+ if (niceval > MAX_NICE)
+ niceval = MAX_NICE;
rcu_read_lock();
read_lock(&tasklist_lock);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 49e13e1..7754ff1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -386,13 +386,6 @@
.proc_handler = proc_dointvec,
},
{
- .procname = "numa_balancing_migrate_deferred",
- .data = &sysctl_numa_balancing_migrate_deferred,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "numa_balancing",
.data = NULL, /* filled in by handler */
.maxlen = sizeof(unsigned int),
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0abb364..4d23dc4d8 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -116,20 +116,42 @@
void __init sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
+ u64 res, wrap, new_mask, new_epoch, cyc, ns;
+ u32 new_mult, new_shift;
+ ktime_t new_wrap_kt;
unsigned long r;
- u64 res, wrap;
char r_unit;
if (cd.rate > rate)
return;
WARN_ON(!irqs_disabled());
- read_sched_clock = read;
- sched_clock_mask = CLOCKSOURCE_MASK(bits);
- cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
- clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
+ clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
+
+ new_mask = CLOCKSOURCE_MASK(bits);
+
+ /* calculate how many ns until we wrap */
+ wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
+ new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
+
+ /* update epoch for new counter and update epoch_ns from old counter*/
+ new_epoch = read();
+ cyc = read_sched_clock();
+ ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+ cd.mult, cd.shift);
+
+ raw_write_seqcount_begin(&cd.seq);
+ read_sched_clock = read;
+ sched_clock_mask = new_mask;
+ cd.rate = rate;
+ cd.wrap_kt = new_wrap_kt;
+ cd.mult = new_mult;
+ cd.shift = new_shift;
+ cd.epoch_cyc = new_epoch;
+ cd.epoch_ns = ns;
+ raw_write_seqcount_end(&cd.seq);
r = rate;
if (r >= 4000000) {
@@ -141,22 +163,12 @@
} else
r_unit = ' ';
- /* calculate how many ns until we wrap */
- wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
- cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
-
/* calculate the ns resolution of this counter */
- res = cyc_to_ns(1ULL, cd.mult, cd.shift);
+ res = cyc_to_ns(1ULL, new_mult, new_shift);
+
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
bits, r, r_unit, res, wrap);
- update_sched_clock();
-
- /*
- * Ensure that sched_clock() starts off at 0ns
- */
- cd.epoch_ns = 0;
-
/* Enable IRQ time accounting if we have a fast enough sched_clock */
if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
enable_sched_clock_irqtime();
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 0aa4ce81..5b40279 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1435,7 +1435,8 @@
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
if (clock_set)
- clock_was_set();
+ /* Have to call _delayed version, since in irq context*/
+ clock_was_set_delayed();
}
/**
diff --git a/kernel/timer.c b/kernel/timer.c
index accfd24..d78de04 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -52,7 +52,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/timer.h>
-u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
diff --git a/kernel/torture.c b/kernel/torture.c
new file mode 100644
index 0000000..acc9afc
--- /dev/null
+++ b/kernel/torture.c
@@ -0,0 +1,719 @@
+/*
+ * Common functions for in-kernel torture tests.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ * Based on kernel/rcu/torture.c.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+
+static char *torture_type;
+static bool verbose;
+
+/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
+#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
+#define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
+#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
+static int fullstop = FULLSTOP_RMMOD;
+static DEFINE_MUTEX(fullstop_mutex);
+static int *torture_runnable;
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Variables for online-offline handling. Only present if CPU hotplug
+ * is enabled, otherwise does nothing.
+ */
+
+static struct task_struct *onoff_task;
+static long onoff_holdoff;
+static long onoff_interval;
+static long n_offline_attempts;
+static long n_offline_successes;
+static unsigned long sum_offline;
+static int min_offline = -1;
+static int max_offline;
+static long n_online_attempts;
+static long n_online_successes;
+static unsigned long sum_online;
+static int min_online = -1;
+static int max_online;
+
+/*
+ * Execute random CPU-hotplug operations at the interval specified
+ * by the onoff_interval.
+ */
+static int
+torture_onoff(void *arg)
+{
+ int cpu;
+ unsigned long delta;
+ int maxcpu = -1;
+ DEFINE_TORTURE_RANDOM(rand);
+ int ret;
+ unsigned long starttime;
+
+ VERBOSE_TOROUT_STRING("torture_onoff task started");
+ for_each_online_cpu(cpu)
+ maxcpu = cpu;
+ WARN_ON(maxcpu < 0);
+ if (onoff_holdoff > 0) {
+ VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
+ schedule_timeout_interruptible(onoff_holdoff);
+ VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
+ }
+ while (!torture_must_stop()) {
+ cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
+ if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: offlining %d\n",
+ torture_type, cpu);
+ starttime = jiffies;
+ n_offline_attempts++;
+ ret = cpu_down(cpu);
+ if (ret) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: offline %d failed: errno %d\n",
+ torture_type, cpu, ret);
+ } else {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: offlined %d\n",
+ torture_type, cpu);
+ n_offline_successes++;
+ delta = jiffies - starttime;
+ sum_offline += delta;
+ if (min_offline < 0) {
+ min_offline = delta;
+ max_offline = delta;
+ }
+ if (min_offline > delta)
+ min_offline = delta;
+ if (max_offline < delta)
+ max_offline = delta;
+ }
+ } else if (cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: onlining %d\n",
+ torture_type, cpu);
+ starttime = jiffies;
+ n_online_attempts++;
+ ret = cpu_up(cpu);
+ if (ret) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: online %d failed: errno %d\n",
+ torture_type, cpu, ret);
+ } else {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_onoff task: onlined %d\n",
+ torture_type, cpu);
+ n_online_successes++;
+ delta = jiffies - starttime;
+ sum_online += delta;
+ if (min_online < 0) {
+ min_online = delta;
+ max_online = delta;
+ }
+ if (min_online > delta)
+ min_online = delta;
+ if (max_online < delta)
+ max_online = delta;
+ }
+ }
+ schedule_timeout_interruptible(onoff_interval);
+ }
+ torture_kthread_stopping("torture_onoff");
+ return 0;
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Initiate online-offline handling.
+ */
+int torture_onoff_init(long ooholdoff, long oointerval)
+{
+ int ret = 0;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ onoff_holdoff = ooholdoff;
+ onoff_interval = oointerval;
+ if (onoff_interval <= 0)
+ return 0;
+ ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+ return ret;
+}
+EXPORT_SYMBOL_GPL(torture_onoff_init);
+
+/*
+ * Clean up after online/offline testing.
+ */
+static void torture_onoff_cleanup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (onoff_task == NULL)
+ return;
+ VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
+ kthread_stop(onoff_task);
+ onoff_task = NULL;
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+}
+EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
+
+/*
+ * Print online/offline testing statistics.
+ */
+char *torture_onoff_stats(char *page)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ page += sprintf(page,
+ "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
+ n_online_successes, n_online_attempts,
+ n_offline_successes, n_offline_attempts,
+ min_online, max_online,
+ min_offline, max_offline,
+ sum_online, sum_offline, HZ);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+ return page;
+}
+EXPORT_SYMBOL_GPL(torture_onoff_stats);
+
+/*
+ * Were all the online/offline operations successful?
+ */
+bool torture_onoff_failures(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ return n_online_successes != n_online_attempts ||
+ n_offline_successes != n_offline_attempts;
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+ return false;
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+}
+EXPORT_SYMBOL_GPL(torture_onoff_failures);
+
+#define TORTURE_RANDOM_MULT 39916801 /* prime */
+#define TORTURE_RANDOM_ADD 479001701 /* prime */
+#define TORTURE_RANDOM_REFRESH 10000
+
+/*
+ * Crude but fast random-number generator. Uses a linear congruential
+ * generator, with occasional help from cpu_clock().
+ */
+unsigned long
+torture_random(struct torture_random_state *trsp)
+{
+ if (--trsp->trs_count < 0) {
+ trsp->trs_state += (unsigned long)local_clock();
+ trsp->trs_count = TORTURE_RANDOM_REFRESH;
+ }
+ trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
+ TORTURE_RANDOM_ADD;
+ return swahw32(trsp->trs_state);
+}
+EXPORT_SYMBOL_GPL(torture_random);
+
+/*
+ * Variables for shuffling. The idea is to ensure that each CPU stays
+ * idle for an extended period to test interactions with dyntick idle,
+ * as well as interactions with any per-CPU varibles.
+ */
+struct shuffle_task {
+ struct list_head st_l;
+ struct task_struct *st_t;
+};
+
+static long shuffle_interval; /* In jiffies. */
+static struct task_struct *shuffler_task;
+static cpumask_var_t shuffle_tmp_mask;
+static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
+static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
+static DEFINE_MUTEX(shuffle_task_mutex);
+
+/*
+ * Register a task to be shuffled. If there is no memory, just splat
+ * and don't bother registering.
+ */
+void torture_shuffle_task_register(struct task_struct *tp)
+{
+ struct shuffle_task *stp;
+
+ if (WARN_ON_ONCE(tp == NULL))
+ return;
+ stp = kmalloc(sizeof(*stp), GFP_KERNEL);
+ if (WARN_ON_ONCE(stp == NULL))
+ return;
+ stp->st_t = tp;
+ mutex_lock(&shuffle_task_mutex);
+ list_add(&stp->st_l, &shuffle_task_list);
+ mutex_unlock(&shuffle_task_mutex);
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
+
+/*
+ * Unregister all tasks, for example, at the end of the torture run.
+ */
+static void torture_shuffle_task_unregister_all(void)
+{
+ struct shuffle_task *stp;
+ struct shuffle_task *p;
+
+ mutex_lock(&shuffle_task_mutex);
+ list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
+ list_del(&stp->st_l);
+ kfree(stp);
+ }
+ mutex_unlock(&shuffle_task_mutex);
+}
+
+/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
+ * A special case is when shuffle_idle_cpu = -1, in which case we allow
+ * the tasks to run on all CPUs.
+ */
+static void torture_shuffle_tasks(void)
+{
+ struct shuffle_task *stp;
+
+ cpumask_setall(shuffle_tmp_mask);
+ get_online_cpus();
+
+ /* No point in shuffling if there is only one online CPU (ex: UP) */
+ if (num_online_cpus() == 1) {
+ put_online_cpus();
+ return;
+ }
+
+ /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
+ shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
+ if (shuffle_idle_cpu >= nr_cpu_ids)
+ shuffle_idle_cpu = -1;
+ if (shuffle_idle_cpu != -1) {
+ cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
+ if (cpumask_empty(shuffle_tmp_mask)) {
+ put_online_cpus();
+ return;
+ }
+ }
+
+ mutex_lock(&shuffle_task_mutex);
+ list_for_each_entry(stp, &shuffle_task_list, st_l)
+ set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
+ mutex_unlock(&shuffle_task_mutex);
+
+ put_online_cpus();
+}
+
+/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+ * system to become idle at a time and cut off its timer ticks. This is meant
+ * to test the support for such tickless idle CPU in RCU.
+ */
+static int torture_shuffle(void *arg)
+{
+ VERBOSE_TOROUT_STRING("torture_shuffle task started");
+ do {
+ schedule_timeout_interruptible(shuffle_interval);
+ torture_shuffle_tasks();
+ torture_shutdown_absorb("torture_shuffle");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("torture_shuffle");
+ return 0;
+}
+
+/*
+ * Start the shuffler, with shuffint in jiffies.
+ */
+int torture_shuffle_init(long shuffint)
+{
+ shuffle_interval = shuffint;
+
+ shuffle_idle_cpu = -1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
+ return -ENOMEM;
+ }
+
+ /* Create the shuffler thread */
+ return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_init);
+
+/*
+ * Stop the shuffling.
+ */
+static void torture_shuffle_cleanup(void)
+{
+ torture_shuffle_task_unregister_all();
+ if (shuffler_task) {
+ VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
+ kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
+ }
+ shuffler_task = NULL;
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
+
+/*
+ * Variables for auto-shutdown. This allows "lights out" torture runs
+ * to be fully scripted.
+ */
+static int shutdown_secs; /* desired test duration in seconds. */
+static struct task_struct *shutdown_task;
+static unsigned long shutdown_time; /* jiffies to system shutdown. */
+static void (*torture_shutdown_hook)(void);
+
+/*
+ * Absorb kthreads into a kernel function that won't return, so that
+ * they won't ever access module text or data again.
+ */
+void torture_shutdown_absorb(const char *title)
+{
+ while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+ pr_notice("torture thread %s parking due to system shutdown\n",
+ title);
+ schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
+ }
+}
+EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
+
+/*
+ * Cause the torture test to shutdown the system after the test has
+ * run for the time specified by the shutdown_secs parameter.
+ */
+static int torture_shutdown(void *arg)
+{
+ long delta;
+ unsigned long jiffies_snap;
+
+ VERBOSE_TOROUT_STRING("torture_shutdown task started");
+ jiffies_snap = jiffies;
+ while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
+ !torture_must_stop()) {
+ delta = shutdown_time - jiffies_snap;
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "torture_shutdown task: %lu jiffies remaining\n",
+ torture_type, delta);
+ schedule_timeout_interruptible(delta);
+ jiffies_snap = jiffies;
+ }
+ if (torture_must_stop()) {
+ torture_kthread_stopping("torture_shutdown");
+ return 0;
+ }
+
+ /* OK, shut down the system. */
+
+ VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
+ shutdown_task = NULL; /* Avoid self-kill deadlock. */
+ if (torture_shutdown_hook)
+ torture_shutdown_hook();
+ else
+ VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
+ kernel_power_off(); /* Shut down the system. */
+ return 0;
+}
+
+/*
+ * Start up the shutdown task.
+ */
+int torture_shutdown_init(int ssecs, void (*cleanup)(void))
+{
+ int ret = 0;
+
+ shutdown_secs = ssecs;
+ torture_shutdown_hook = cleanup;
+ if (shutdown_secs > 0) {
+ shutdown_time = jiffies + shutdown_secs * HZ;
+ ret = torture_create_kthread(torture_shutdown, NULL,
+ shutdown_task);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(torture_shutdown_init);
+
+/*
+ * Detect and respond to a system shutdown.
+ */
+static int torture_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ mutex_lock(&fullstop_mutex);
+ if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
+ VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
+ ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
+ } else {
+ pr_warn("Concurrent rmmod and shutdown illegal!\n");
+ }
+ mutex_unlock(&fullstop_mutex);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block torture_shutdown_nb = {
+ .notifier_call = torture_shutdown_notify,
+};
+
+/*
+ * Shut down the shutdown task. Say what??? Heh! This can happen if
+ * the torture module gets an rmmod before the shutdown time arrives. ;-)
+ */
+static void torture_shutdown_cleanup(void)
+{
+ unregister_reboot_notifier(&torture_shutdown_nb);
+ if (shutdown_task != NULL) {
+ VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
+ kthread_stop(shutdown_task);
+ }
+ shutdown_task = NULL;
+}
+
+/*
+ * Variables for stuttering, which means to periodically pause and
+ * restart testing in order to catch bugs that appear when load is
+ * suddenly applied to or removed from the system.
+ */
+static struct task_struct *stutter_task;
+static int stutter_pause_test;
+static int stutter;
+
+/*
+ * Block until the stutter interval ends. This must be called periodically
+ * by all running kthreads that need to be subject to stuttering.
+ */
+void stutter_wait(const char *title)
+{
+ while (ACCESS_ONCE(stutter_pause_test) ||
+ (torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
+ if (stutter_pause_test)
+ schedule_timeout_interruptible(1);
+ else
+ schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ torture_shutdown_absorb(title);
+ }
+}
+EXPORT_SYMBOL_GPL(stutter_wait);
+
+/*
+ * Cause the torture test to "stutter", starting and stopping all
+ * threads periodically.
+ */
+static int torture_stutter(void *arg)
+{
+ VERBOSE_TOROUT_STRING("torture_stutter task started");
+ do {
+ if (!torture_must_stop()) {
+ schedule_timeout_interruptible(stutter);
+ ACCESS_ONCE(stutter_pause_test) = 1;
+ }
+ if (!torture_must_stop())
+ schedule_timeout_interruptible(stutter);
+ ACCESS_ONCE(stutter_pause_test) = 0;
+ torture_shutdown_absorb("torture_stutter");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("torture_stutter");
+ return 0;
+}
+
+/*
+ * Initialize and kick off the torture_stutter kthread.
+ */
+int torture_stutter_init(int s)
+{
+ int ret;
+
+ stutter = s;
+ ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(torture_stutter_init);
+
+/*
+ * Cleanup after the torture_stutter kthread.
+ */
+static void torture_stutter_cleanup(void)
+{
+ if (!stutter_task)
+ return;
+ VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
+ kthread_stop(stutter_task);
+ stutter_task = NULL;
+}
+
+/*
+ * Initialize torture module. Please note that this is -not- invoked via
+ * the usual module_init() mechanism, but rather by an explicit call from
+ * the client torture module. This call must be paired with a later
+ * torture_init_end().
+ *
+ * The runnable parameter points to a flag that controls whether or not
+ * the test is currently runnable. If there is no such flag, pass in NULL.
+ */
+void __init torture_init_begin(char *ttype, bool v, int *runnable)
+{
+ mutex_lock(&fullstop_mutex);
+ torture_type = ttype;
+ verbose = v;
+ torture_runnable = runnable;
+ fullstop = FULLSTOP_DONTSTOP;
+
+}
+EXPORT_SYMBOL_GPL(torture_init_begin);
+
+/*
+ * Tell the torture module that initialization is complete.
+ */
+void __init torture_init_end(void)
+{
+ mutex_unlock(&fullstop_mutex);
+ register_reboot_notifier(&torture_shutdown_nb);
+}
+EXPORT_SYMBOL_GPL(torture_init_end);
+
+/*
+ * Clean up torture module. Please note that this is -not- invoked via
+ * the usual module_exit() mechanism, but rather by an explicit call from
+ * the client torture module. Returns true if a race with system shutdown
+ * is detected, otherwise, all kthreads started by functions in this file
+ * will be shut down.
+ *
+ * This must be called before the caller starts shutting down its own
+ * kthreads.
+ */
+bool torture_cleanup(void)
+{
+ mutex_lock(&fullstop_mutex);
+ if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+ pr_warn("Concurrent rmmod and shutdown illegal!\n");
+ mutex_unlock(&fullstop_mutex);
+ schedule_timeout_uninterruptible(10);
+ return true;
+ }
+ ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
+ mutex_unlock(&fullstop_mutex);
+ torture_shutdown_cleanup();
+ torture_shuffle_cleanup();
+ torture_stutter_cleanup();
+ torture_onoff_cleanup();
+ return false;
+}
+EXPORT_SYMBOL_GPL(torture_cleanup);
+
+/*
+ * Is it time for the current torture test to stop?
+ */
+bool torture_must_stop(void)
+{
+ return torture_must_stop_irq() || kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(torture_must_stop);
+
+/*
+ * Is it time for the current torture test to stop? This is the irq-safe
+ * version, hence no check for kthread_should_stop().
+ */
+bool torture_must_stop_irq(void)
+{
+ return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
+}
+EXPORT_SYMBOL_GPL(torture_must_stop_irq);
+
+/*
+ * Each kthread must wait for kthread_should_stop() before returning from
+ * its top-level function, otherwise segfaults ensue. This function
+ * prints a "stopping" message and waits for kthread_should_stop(), and
+ * should be called from all torture kthreads immediately prior to
+ * returning.
+ */
+void torture_kthread_stopping(char *title)
+{
+ if (verbose)
+ VERBOSE_TOROUT_STRING(title);
+ while (!kthread_should_stop()) {
+ torture_shutdown_absorb(title);
+ schedule_timeout_uninterruptible(1);
+ }
+}
+EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+
+/*
+ * Create a generic torture kthread that is immediately runnable. If you
+ * need the kthread to be stopped so that you can do something to it before
+ * it starts, you will need to open-code your own.
+ */
+int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+ char *f, struct task_struct **tp)
+{
+ int ret = 0;
+
+ VERBOSE_TOROUT_STRING(m);
+ *tp = kthread_run(fn, arg, s);
+ if (IS_ERR(*tp)) {
+ ret = PTR_ERR(*tp);
+ VERBOSE_TOROUT_ERRSTRING(f);
+ *tp = NULL;
+ }
+ torture_shuffle_task_register(*tp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(_torture_create_kthread);
+
+/*
+ * Stop a generic kthread, emitting a message.
+ */
+void _torture_stop_kthread(char *m, struct task_struct **tp)
+{
+ if (*tp == NULL)
+ return;
+ VERBOSE_TOROUT_STRING(m);
+ kthread_stop(*tp);
+ *tp = NULL;
+}
+EXPORT_SYMBOL_GPL(_torture_stop_kthread);
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index a5457d5..0434ff1 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -40,8 +40,8 @@
module_param(write_iteration, uint, 0644);
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
-static int producer_nice = 19;
-static int consumer_nice = 19;
+static int producer_nice = MAX_NICE;
+static int consumer_nice = MAX_NICE;
static int producer_fifo = -1;
static int consumer_fifo = -1;
@@ -308,7 +308,7 @@
/* Let the user know that the test is running at low priority */
if (producer_fifo < 0 && consumer_fifo < 0 &&
- producer_nice == 19 && consumer_nice == 19)
+ producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
trace_printk("WARNING!!! This test is running at lowest priority.\n");
trace_printk("Time: %lld (usecs)\n", time);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 815c878..24c1f23 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1600,15 +1600,31 @@
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
+static struct ring_buffer *temp_buffer;
+
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
struct ftrace_event_file *ftrace_file,
int type, unsigned long len,
unsigned long flags, int pc)
{
+ struct ring_buffer_event *entry;
+
*current_rb = ftrace_file->tr->trace_buffer.buffer;
- return trace_buffer_lock_reserve(*current_rb,
+ entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
+ /*
+ * If tracing is off, but we have triggers enabled
+ * we still need to look at the event data. Use the temp_buffer
+ * to store the trace event for the tigger to use. It's recusive
+ * safe and will not be recorded anywhere.
+ */
+ if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
+ *current_rb = temp_buffer;
+ entry = trace_buffer_lock_reserve(*current_rb,
+ type, len, flags, pc);
+ }
+ return entry;
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
@@ -6494,11 +6510,16 @@
raw_spin_lock_init(&global_trace.start_lock);
+ /* Used for event triggers */
+ temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+ if (!temp_buffer)
+ goto out_free_cpumask;
+
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
- goto out_free_cpumask;
+ goto out_free_temp_buffer;
}
if (global_trace.buffer_disabled)
@@ -6540,6 +6561,8 @@
return 0;
+out_free_temp_buffer:
+ ring_buffer_free(temp_buffer);
out_free_cpumask:
free_percpu(global_trace.trace_buffer.data);
#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index e854f42..c894614 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -31,9 +31,25 @@
}
/* The ftrace function trace is allowed only for root. */
- if (ftrace_event_is_function(tp_event) &&
- perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (ftrace_event_is_function(tp_event)) {
+ if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * We don't allow user space callchains for function trace
+ * event, due to issues with page faults while tracing page
+ * fault handler and its overall trickiness nature.
+ */
+ if (!p_event->attr.exclude_callchain_user)
+ return -EINVAL;
+
+ /*
+ * Same reason to disable user stack dump as for user space
+ * callchains above.
+ */
+ if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
+ return -EINVAL;
+ }
/* No tracing, just counting, so no obvious leak */
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e71ffd4..7b16d40 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -27,12 +27,6 @@
DEFINE_MUTEX(event_mutex);
-DEFINE_MUTEX(event_storage_mutex);
-EXPORT_SYMBOL_GPL(event_storage_mutex);
-
-char event_storage[EVENT_STORAGE_SIZE];
-EXPORT_SYMBOL_GPL(event_storage);
-
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_common_fields);
@@ -1777,6 +1771,16 @@
{
struct ftrace_event_call **call, **start, **end;
+ if (!mod->num_trace_events)
+ return;
+
+ /* Don't add infrastructure for mods without tracepoints */
+ if (trace_module_has_bad_taint(mod)) {
+ pr_err("%s: module has bad taint, not creating trace events\n",
+ mod->name);
+ return;
+ }
+
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7c3e3e7..ee0a509 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -95,15 +95,12 @@
#undef __array
#define __array(type, item, len) \
do { \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- mutex_lock(&event_storage_mutex); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), filter_type); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2aefbee..887ef88 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -498,14 +498,14 @@
}
EXPORT_SYMBOL(trace_hardirqs_off);
-void trace_hardirqs_on_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
-void trace_hardirqs_off_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 29f2654..031cc56 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -631,6 +631,11 @@
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
#ifdef CONFIG_MODULES
+bool trace_module_has_bad_taint(struct module *mod)
+{
+ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
+}
+
static int tracepoint_module_coming(struct module *mod)
{
struct tp_module *tp_mod, *iter;
@@ -641,7 +646,7 @@
* module headers (for forced load), to make sure we don't cause a crash.
* Staging and out-of-tree GPL modules are fine.
*/
- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+ if (trace_module_has_bad_taint(mod))
return 0;
mutex_lock(&tracepoints_mutex);
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 240fb62..dd06439 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -225,7 +225,7 @@
*
* When there is no mapping defined for the user-namespace uid
* pair INVALID_UID is returned. Callers are expected to test
- * for and handle handle INVALID_UID being returned. INVALID_UID
+ * for and handle INVALID_UID being returned. INVALID_UID
* may be tested for using uid_valid().
*/
kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82ef9f3..3fa5b8f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1851,6 +1851,12 @@
if (worker->flags & WORKER_IDLE)
pool->nr_idle--;
+ /*
+ * Once WORKER_DIE is set, the kworker may destroy itself at any
+ * point. Pin to ensure the task stays until we're done with it.
+ */
+ get_task_struct(worker->task);
+
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
@@ -1859,6 +1865,7 @@
spin_unlock_irq(&pool->lock);
kthread_stop(worker->task);
+ put_task_struct(worker->task);
kfree(worker);
spin_lock_irq(&pool->lock);
@@ -3218,7 +3225,7 @@
return -ENOMEM;
if (sscanf(buf, "%d", &attrs->nice) == 1 &&
- attrs->nice >= -20 && attrs->nice <= 19)
+ attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
ret = apply_workqueue_attrs(wq, attrs);
else
ret = -EINVAL;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a48abea..dd7f885 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -980,6 +980,21 @@
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
+config LOCK_TORTURE_TEST
+ tristate "torture tests for locking"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ default n
+ help
+ This option provides a kernel module that runs torture tests
+ on kernel locking primitives. The kernel module may be built
+ after the fact on the running kernel to be tested, if desired.
+
+ Say Y here if you want kernel locking-primitive torture tests
+ to be built into the kernel.
+ Say M if you want these torture tests to build as a module.
+ Say N if you are unsure.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1141,9 +1156,14 @@
Say N if you are unsure.
+config TORTURE_TEST
+ tristate
+ default n
+
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
+ select TORTURE_TEST
default n
help
This option provides a kernel module that runs torture tests
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 2defd13..98f2d7e 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -424,111 +424,134 @@
EXPORT_SYMBOL(debug_dma_dump_mappings);
/*
- * For each page mapped (initial page in the case of
- * dma_alloc_coherent/dma_map_{single|page}, or each page in a
- * scatterlist) insert into this tree using the pfn as the key. At
+ * For each mapping (initial cacheline in the case of
+ * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+ * scatterlist, or the cacheline specified in dma_map_single) insert
+ * into this tree using the cacheline as the key. At
* dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
- * the pfn already exists at insertion time add a tag as a reference
+ * the entry already exists at insertion time add a tag as a reference
* count for the overlapping mappings. For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the pfn
- * idle, but we should also be flagging overlaps as an API violation.
+ * just ensures that 'unmaps' balance 'maps' before marking the
+ * cacheline idle, but we should also be flagging overlaps as an API
+ * violation.
*
* Memory usage is mostly constrained by the maximum number of available
* dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree. In the case of dma_map_{single|page} and
- * dma_alloc_coherent there is only one dma_debug_entry and one pfn to
- * track per event. dma_map_sg(), on the other hand,
- * consumes a single dma_debug_entry, but inserts 'nents' entries into
- * the tree.
+ * inserting into the tree. In the case of dma_map_page and
+ * dma_alloc_coherent there is only one dma_debug_entry and one
+ * dma_active_cacheline entry to track per event. dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
*
* At any time debug_dma_assert_idle() can be called to trigger a
- * warning if the given page is in the active set.
+ * warning if any cachelines in the given page are in the active set.
*/
-static RADIX_TREE(dma_active_pfn, GFP_NOWAIT);
+static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-static int active_pfn_read_overlap(unsigned long pfn)
+static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+{
+ return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+ (entry->offset >> L1_CACHE_SHIFT);
+}
+
+static int active_cacheline_read_overlap(phys_addr_t cln)
{
int overlap = 0, i;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (radix_tree_tag_get(&dma_active_pfn, pfn, i))
+ if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
overlap |= 1 << i;
return overlap;
}
-static int active_pfn_set_overlap(unsigned long pfn, int overlap)
+static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
{
int i;
- if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
+ if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
return overlap;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
if (overlap & 1 << i)
- radix_tree_tag_set(&dma_active_pfn, pfn, i);
+ radix_tree_tag_set(&dma_active_cacheline, cln, i);
else
- radix_tree_tag_clear(&dma_active_pfn, pfn, i);
+ radix_tree_tag_clear(&dma_active_cacheline, cln, i);
return overlap;
}
-static void active_pfn_inc_overlap(unsigned long pfn)
+static void active_cacheline_inc_overlap(phys_addr_t cln)
{
- int overlap = active_pfn_read_overlap(pfn);
+ int overlap = active_cacheline_read_overlap(cln);
- overlap = active_pfn_set_overlap(pfn, ++overlap);
+ overlap = active_cacheline_set_overlap(cln, ++overlap);
/* If we overflowed the overlap counter then we're potentially
* leaking dma-mappings. Otherwise, if maps and unmaps are
* balanced then this overflow may cause false negatives in
- * debug_dma_assert_idle() as the pfn may be marked idle
+ * debug_dma_assert_idle() as the cacheline may be marked idle
* prematurely.
*/
- WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
- "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
- ACTIVE_PFN_MAX_OVERLAP, pfn);
+ WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
+ "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+ ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
}
-static int active_pfn_dec_overlap(unsigned long pfn)
+static int active_cacheline_dec_overlap(phys_addr_t cln)
{
- int overlap = active_pfn_read_overlap(pfn);
+ int overlap = active_cacheline_read_overlap(cln);
- return active_pfn_set_overlap(pfn, --overlap);
+ return active_cacheline_set_overlap(cln, --overlap);
}
-static int active_pfn_insert(struct dma_debug_entry *entry)
+static int active_cacheline_insert(struct dma_debug_entry *entry)
{
+ phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags;
int rc;
+ /* If the device is not writing memory then we don't have any
+ * concerns about the cpu consuming stale data. This mitigates
+ * legitimate usages of overlapping mappings.
+ */
+ if (entry->direction == DMA_TO_DEVICE)
+ return 0;
+
spin_lock_irqsave(&radix_lock, flags);
- rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry);
+ rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
if (rc == -EEXIST)
- active_pfn_inc_overlap(entry->pfn);
+ active_cacheline_inc_overlap(cln);
spin_unlock_irqrestore(&radix_lock, flags);
return rc;
}
-static void active_pfn_remove(struct dma_debug_entry *entry)
+static void active_cacheline_remove(struct dma_debug_entry *entry)
{
+ phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags;
+ /* ...mirror the insert case */
+ if (entry->direction == DMA_TO_DEVICE)
+ return;
+
spin_lock_irqsave(&radix_lock, flags);
/* since we are counting overlaps the final put of the
- * entry->pfn will occur when the overlap count is 0.
- * active_pfn_dec_overlap() returns -1 in that case
+ * cacheline will occur when the overlap count is 0.
+ * active_cacheline_dec_overlap() returns -1 in that case
*/
- if (active_pfn_dec_overlap(entry->pfn) < 0)
- radix_tree_delete(&dma_active_pfn, entry->pfn);
+ if (active_cacheline_dec_overlap(cln) < 0)
+ radix_tree_delete(&dma_active_cacheline, cln);
spin_unlock_irqrestore(&radix_lock, flags);
}
/**
* debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_pfn tree
+ * @page: page to lookup in the dma_active_cacheline tree
*
* Place a call to this routine in cases where the cpu touching the page
* before the dma completes (page is dma_unmapped) will lead to data
@@ -536,22 +559,38 @@
*/
void debug_dma_assert_idle(struct page *page)
{
+ static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
+ struct dma_debug_entry *entry = NULL;
+ void **results = (void **) &ents;
+ unsigned int nents, i;
unsigned long flags;
- struct dma_debug_entry *entry;
+ phys_addr_t cln;
if (!page)
return;
+ cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
spin_lock_irqsave(&radix_lock, flags);
- entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page));
+ nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
+ CACHELINES_PER_PAGE);
+ for (i = 0; i < nents; i++) {
+ phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+
+ if (ent_cln == cln) {
+ entry = ents[i];
+ break;
+ } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
+ break;
+ }
spin_unlock_irqrestore(&radix_lock, flags);
if (!entry)
return;
+ cln = to_cacheline_number(entry);
err_printk(entry->dev, entry,
- "DMA-API: cpu touching an active dma mapped page "
- "[pfn=0x%lx]\n", entry->pfn);
+ "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+ &cln);
}
/*
@@ -568,9 +607,9 @@
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags);
- rc = active_pfn_insert(entry);
+ rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) {
- pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n");
+ pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
}
@@ -631,7 +670,7 @@
{
unsigned long flags;
- active_pfn_remove(entry);
+ active_cacheline_remove(entry);
/*
* add to beginning of the list - this way the entries are
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 4dc1b99..34fd931 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -9,7 +9,7 @@
config FONTS
bool "Select compiled-in fonts"
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
help
Say Y here if you would like to use fonts other than the default
your frame buffer console usually use.
@@ -22,7 +22,7 @@
config FONT_8x8
bool "VGA 8x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
@@ -45,7 +45,7 @@
config FONT_6x11
bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS && MAC
help
Small console font with Macintosh-style high-half glyphs. Some Mac
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7811ed3..bd4a8dfd 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1253,8 +1253,10 @@
node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
- if (cur_index > max_index)
+ if (cur_index > max_index) {
+ rcu_read_unlock();
break;
+ }
cur_index = __locate(node, item, cur_index, &found_index);
rcu_read_unlock();
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df..6148967 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -244,8 +244,19 @@
static bool latch = false;
static DEFINE_SPINLOCK(lock);
+ /* Asking for random bytes might result in bytes getting
+ * moved into the nonblocking pool and thus marking it
+ * as initialized. In this case we would double back into
+ * this function and attempt to do a late reseed.
+ * Ignore the pointless attempt to reseed again if we're
+ * already waiting for bytes when the nonblocking pool
+ * got initialized.
+ */
+
/* only allow initial seeding (late == false) once */
- spin_lock_irqsave(&lock, flags);
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
if (latch && !late)
goto out;
latch = true;
diff --git a/lib/string.c b/lib/string.c
index e5878de..9b1f906 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -648,7 +648,7 @@
* @count: The size of the area.
*/
#undef memcmp
-int memcmp(const void *cs, const void *ct, size_t count)
+__visible int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
diff --git a/mm/Kconfig b/mm/Kconfig
index 2d9f150..2888024 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -575,5 +575,5 @@
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
- You can check speed with zsmalloc benchmark[1].
- [1] https://github.com/spartacus06/zsmalloc
+ You can check speed with zsmalloc benchmark:
+ https://github.com/spartacus06/zsmapbench
diff --git a/mm/compaction.c b/mm/compaction.c
index b48c525..9185775 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -251,7 +251,6 @@
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL;
- unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;
@@ -264,11 +263,12 @@
nr_scanned++;
if (!pfn_valid_within(blockpfn))
- continue;
+ goto isolate_fail;
+
if (!valid_page)
valid_page = page;
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/*
* The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
- if (!isolated && strict)
- break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@
if (isolated) {
blockpfn += isolated - 1;
cursor += isolated - 1;
+ continue;
}
+
+isolate_fail:
+ if (strict)
+ break;
+ else
+ continue;
+
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
- if (strict && nr_strict_required > total_isolated)
+ if (strict && blockpfn < end_pfn)
total_isolated = 0;
if (locked)
diff --git a/mm/fremap.c b/mm/fremap.c
index bbc4d66..34feba6 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -23,28 +23,44 @@
#include "internal.h"
+static int mm_counter(struct page *page)
+{
+ return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
+}
+
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
+ struct page *page;
+ swp_entry_t entry;
if (pte_present(pte)) {
- struct page *page;
-
flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
set_page_dirty(page);
+ update_hiwater_rss(mm);
+ dec_mm_counter(mm, mm_counter(page));
page_remove_rmap(page);
page_cache_release(page);
- update_hiwater_rss(mm);
- dec_mm_counter(mm, MM_FILEPAGES);
}
- } else {
- if (!pte_file(pte))
- free_swap_and_cache(pte_to_swp_entry(pte));
+ } else { /* zap_pte() is not called when pte_none() */
+ if (!pte_file(pte)) {
+ update_hiwater_rss(mm);
+ entry = pte_to_swp_entry(pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+ dec_mm_counter(mm, mm_counter(page));
+ }
+ } else {
+ free_swap_and_cache(entry);
+ dec_mm_counter(mm, MM_SWAPENTS);
+ }
+ }
pte_clear_not_present_full(mm, addr, ptep, 0);
}
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index da23eb9..1546655 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1166,8 +1166,10 @@
} else {
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
- if (ret & VM_FAULT_OOM)
+ if (ret & VM_FAULT_OOM) {
split_huge_page(page);
+ ret |= VM_FAULT_FALLBACK;
+ }
put_page(page);
}
count_vm_event(THP_FAULT_FALLBACK);
@@ -1179,9 +1181,10 @@
if (page) {
split_huge_page(page);
put_page(page);
- }
+ } else
+ split_huge_page_pmd(vma, address, pmd);
+ ret |= VM_FAULT_FALLBACK;
count_vm_event(THP_FAULT_FALLBACK);
- ret |= VM_FAULT_OOM;
goto out;
}
@@ -1958,7 +1961,7 @@
return ret;
}
-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
diff --git a/mm/ksm.c b/mm/ksm.c
index aa4c7c7..68710e8 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@
static struct page *page_trans_compound_anon(struct page *page)
{
if (PageTransCompound(page)) {
- struct page *head = compound_trans_head(page);
+ struct page *head = compound_head(page);
/*
* head may actually be splitted and freed from under
* us but it's ok here.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53385cd..5b6b003 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1127,8 +1127,8 @@
* skipping css reference should be safe.
*/
if (next_css) {
- if ((next_css->flags & CSS_ONLINE) &&
- (next_css == &root->css || css_tryget(next_css)))
+ if ((next_css == &root->css) ||
+ ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
return mem_cgroup_from_css(next_css);
prev_css = next_css;
@@ -1687,7 +1687,7 @@
* protects memcg_name and makes sure that parallel ooms do not
* interleave
*/
- static DEFINE_SPINLOCK(oom_info_lock);
+ static DEFINE_MUTEX(oom_info_lock);
struct cgroup *task_cgrp;
struct cgroup *mem_cgrp;
static char memcg_name[PATH_MAX];
@@ -1698,7 +1698,7 @@
if (!p)
return;
- spin_lock(&oom_info_lock);
+ mutex_lock(&oom_info_lock);
rcu_read_lock();
mem_cgrp = memcg->css.cgroup;
@@ -1767,7 +1767,7 @@
pr_cont("\n");
}
- spin_unlock(&oom_info_lock);
+ mutex_unlock(&oom_info_lock);
}
/*
@@ -6595,6 +6595,7 @@
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_event *event, *tmp;
+ struct cgroup_subsys_state *iter;
/*
* Unregister events and notify userspace.
@@ -6611,7 +6612,14 @@
kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg);
- mem_cgroup_reparent_charges(memcg);
+
+ /*
+ * This requires that offlining is serialized. Right now that is
+ * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+ */
+ css_for_each_descendant_post(iter, css)
+ mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
mem_cgroup_destroy_all_caches(memcg);
vmpressure_cleanup(&memcg->vmpressure);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2f2f34a..90002ea 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1651,7 +1651,7 @@
{
int ret;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_trans_head(page);
+ struct page *hpage = compound_head(page);
if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn);
diff --git a/mm/memory.c b/mm/memory.c
index be6a0c0..22dfa61 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3348,6 +3348,7 @@
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
ret = VM_FAULT_HWPOISON;
+ page_cache_release(vmf.page);
goto uncharge_out;
}
@@ -3703,7 +3704,6 @@
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
-retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
@@ -3741,20 +3741,13 @@
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
- /*
- * If COW results in an oom, the huge pmd will
- * have been split, so retry the fault on the
- * pte for a smaller charge.
- */
- if (unlikely(ret & VM_FAULT_OOM))
- goto retry;
- return ret;
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
+ return 0;
}
-
- return 0;
}
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ae3c8f3..4755c85 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1556,10 +1556,10 @@
#ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+ compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode,
+ compat_ulong_t, addr, compat_ulong_t, flags)
{
long err;
unsigned long __user *nm = NULL;
@@ -1586,8 +1586,8 @@
return err;
}
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode)
+COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode)
{
long err = 0;
unsigned long __user *nm = NULL;
@@ -1609,9 +1609,9 @@
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+ compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode, compat_ulong_t, flags)
{
long err = 0;
unsigned long __user *nm = NULL;
@@ -2301,35 +2301,6 @@
kmem_cache_free(sn_cache, n);
}
-#ifdef CONFIG_NUMA_BALANCING
-static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
-{
- /* Never defer a private fault */
- if (cpupid_match_pid(p, last_cpupid))
- return false;
-
- if (p->numa_migrate_deferred) {
- p->numa_migrate_deferred--;
- return true;
- }
- return false;
-}
-
-static inline void defer_numa_migrate(struct task_struct *p)
-{
- p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
-}
-#else
-static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
-{
- return false;
-}
-
-static inline void defer_numa_migrate(struct task_struct *p)
-{
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
/**
* mpol_misplaced - check whether current page node is valid in policy
*
@@ -2403,52 +2374,9 @@
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
- int last_cpupid;
- int this_cpupid;
-
polnid = thisnid;
- this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
- /*
- * Multi-stage node selection is used in conjunction
- * with a periodic migration fault to build a temporal
- * task<->page relation. By using a two-stage filter we
- * remove short/unlikely relations.
- *
- * Using P(p) ~ n_p / n_t as per frequentist
- * probability, we can equate a task's usage of a
- * particular page (n_p) per total usage of this
- * page (n_t) (in a given time-span) to a probability.
- *
- * Our periodic faults will sample this probability and
- * getting the same result twice in a row, given these
- * samples are fully independent, is then given by
- * P(n)^2, provided our sample period is sufficiently
- * short compared to the usage pattern.
- *
- * This quadric squishes small probabilities, making
- * it less likely we act on an unlikely task<->page
- * relation.
- */
- last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
- if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
-
- /* See sysctl_numa_balancing_migrate_deferred comment */
- if (!cpupid_match_pid(current, last_cpupid))
- defer_numa_migrate(current);
-
- goto out;
- }
-
- /*
- * The quadratic filter above reduces extraneous migration
- * of shared pages somewhat. This code reduces it even more,
- * reducing the overhead of page migrations of shared pages.
- * This makes workloads with shared pages rely more on
- * "move task near its memory", and less on "move memory
- * towards its task", which is exactly what we want.
- */
- if (numa_migrate_deferred(current, last_cpupid))
+ if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
goto out;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 482a33d..bed4880 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,37 @@
}
/*
+ * Congratulations to trinity for discovering this bug.
+ * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
+ * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
+ * replace the specified range by file ptes throughout (maybe populated after).
+ * If page migration finds a page within that range, while it's still located
+ * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
+ * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
+ * But if the migrating page is in a part of the vma outside the range to be
+ * remapped, then it will not be cleared, and remove_migration_ptes() needs to
+ * deal with it. Fortunately, this part of the vma is of course still linear,
+ * so we just need to use linear location on the nonlinear list.
+ */
+static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
+ struct address_space *mapping, void *arg)
+{
+ struct vm_area_struct *vma;
+ /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ unsigned long addr;
+
+ list_for_each_entry(vma,
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
+
+ addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ if (addr >= vma->vm_start && addr < vma->vm_end)
+ remove_migration_pte(page, vma, addr, arg);
+ }
+ return SWAP_AGAIN;
+}
+
+/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
@@ -186,6 +217,7 @@
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
+ .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
};
rmap_walk(new, &rwc);
@@ -1158,7 +1190,7 @@
pm->node);
else
return alloc_pages_exact_node(pm->node,
- GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
+ GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
}
/*
@@ -1544,9 +1576,9 @@
struct page *newpage;
newpage = alloc_pages_exact_node(nid,
- (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
- __GFP_NOMEMALLOC | __GFP_NORETRY |
- __GFP_NOWARN) &
+ (GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_NOWARN) &
~GFP_IOFS, 0);
return newpage;
@@ -1747,7 +1779,8 @@
goto out_dropref;
new_page = alloc_pages_node(node,
- (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
+ (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+ HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3758a0..3bac76a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -369,9 +369,11 @@
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- __SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
@@ -1236,6 +1238,15 @@
}
local_irq_restore(flags);
}
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+ return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
+}
+#else
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+ return false;
+}
#endif
/*
@@ -1572,7 +1583,13 @@
get_pageblock_migratetype(page));
}
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+ /*
+ * NOTE: GFP_THISNODE allocations do not partake in the kswapd
+ * aging protocol, so they can't be fair.
+ */
+ if (!gfp_thisnode_allocation(gfp_flags))
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
@@ -1944,8 +1961,12 @@
* ultimately fall back to remote zones that do not
* partake in the fairness round-robin cycle of this
* zonelist.
+ *
+ * NOTE: GFP_THISNODE allocations do not partake in
+ * the kswapd aging protocol, so they can't be fair.
*/
- if (alloc_flags & ALLOC_WMARK_LOW) {
+ if ((alloc_flags & ALLOC_WMARK_LOW) &&
+ !gfp_thisnode_allocation(gfp_mask)) {
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue;
if (!zone_local(preferred_zone, zone))
@@ -2501,8 +2522,7 @@
* allowed per node queues are empty and that nodes are
* over allocated.
*/
- if (IS_ENABLED(CONFIG_NUMA) &&
- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ if (gfp_thisnode_allocation(gfp_mask))
goto nopage;
restart:
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index fd26d04..3c5cf68 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -456,25 +456,23 @@
return rc;
}
-asmlinkage ssize_t
-compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- unsigned long liovcnt,
- const struct compat_iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
+ const struct compat_iovec __user *, lvec,
+ compat_ulong_t, liovcnt,
+ const struct compat_iovec __user *, rvec,
+ compat_ulong_t, riovcnt,
+ compat_ulong_t, flags)
{
return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
riovcnt, flags, 0);
}
-asmlinkage ssize_t
-compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- unsigned long liovcnt,
- const struct compat_iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
+ const struct compat_iovec __user *, lvec,
+ compat_ulong_t, liovcnt,
+ const struct compat_iovec __user *, rvec,
+ compat_ulong_t, riovcnt,
+ compat_ulong_t, flags)
{
return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
riovcnt, flags, 1);
diff --git a/mm/rmap.c b/mm/rmap.c
index 9056a1f..11cf322 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1370,8 +1370,9 @@
}
static int try_to_unmap_nonlinear(struct page *page,
- struct address_space *mapping, struct vm_area_struct *vma)
+ struct address_space *mapping, void *arg)
{
+ struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
unsigned long cursor;
unsigned long max_nl_cursor = 0;
@@ -1673,7 +1674,7 @@
if (list_empty(&mapping->i_mmap_nonlinear))
goto done;
- ret = rwc->file_nonlinear(page, mapping, vma);
+ ret = rwc->file_nonlinear(page, mapping, rwc->arg);
done:
mutex_unlock(&mapping->i_mmap_mutex);
diff --git a/mm/swap.c b/mm/swap.c
index b31ba67..0092097 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -98,7 +98,7 @@
}
/* __split_huge_page_refcount can run under us */
- page_head = compound_trans_head(page);
+ page_head = compound_head(page);
/*
* THP can not break up slab pages so avoid taking
@@ -253,7 +253,7 @@
*/
unsigned long flags;
bool got;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
/* Ref to put_compound_page() comment. */
if (!__compound_tail_refcounted(page_head)) {
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 196970a..d4042e7 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/eventfd.h>
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/printk.h>
#include <linux/vmpressure.h>
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ec99099..175273f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -307,9 +307,11 @@
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
vlandev->gso_max_size = dev->gso_max_size;
- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index de51c48..27bfe2f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -538,6 +538,9 @@
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
@@ -575,6 +578,9 @@
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
+ if (dev->features & NETIF_F_VLAN_FEATURES)
+ netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
+
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
@@ -589,7 +595,8 @@
#endif
dev->needed_headroom = real_dev->needed_headroom;
- if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+ if (vlan_hw_offload_capable(real_dev->features,
+ vlan_dev_priv(dev)->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 63f0455..8fe8b71 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,14 +49,14 @@
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
- if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
- goto out;
-
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
+ if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+ goto out;
+
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb, false);
else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 28d5446..d0cca3c 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
+ struct net_port_vlans *pv;
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
@@ -39,18 +40,18 @@
* packet is allowed except in promisc modue when someone
* may be running packet capture.
*/
+ pv = br_get_vlan_info(br);
if (!(brdev->flags & IFF_PROMISC) &&
- !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
+ !br_allowed_egress(br, pv, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
- skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
- if (!skb)
- return NET_RX_DROP;
-
indev = skb->dev;
skb->dev = brdev;
+ skb = br_handle_vlan(br, pv, skb);
+ if (!skb)
+ return NET_RX_DROP;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ef66365..93067ec 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1127,9 +1127,10 @@
struct net_bridge_port *port,
struct bridge_mcast_querier *querier,
int saddr,
+ bool is_general_query,
unsigned long max_delay)
{
- if (saddr)
+ if (saddr && is_general_query)
br_multicast_update_querier_timer(br, querier, max_delay);
else if (timer_pending(&querier->timer))
return;
@@ -1181,8 +1182,16 @@
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
+ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+ * all-systems destination addresses (224.0.0.1) for general queries
+ */
+ if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
- max_delay);
+ !group, max_delay);
if (!group)
goto out;
@@ -1228,6 +1237,7 @@
unsigned long max_delay;
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
+ bool is_general_query;
int err = 0;
spin_lock(&br->multicast_lock);
@@ -1235,6 +1245,12 @@
(port && port->state == BR_STATE_DISABLED))
goto out;
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1256,8 +1272,19 @@
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
+ is_general_query = group && ipv6_addr_any(group);
+
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+ * all-nodes destination address (ff02::1) for general queries
+ */
+ if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip6_querier,
- !ipv6_addr_any(&ip6h->saddr), max_delay);
+ !ipv6_addr_any(&ip6h->saddr),
+ is_general_query, max_delay);
if (!group)
goto out;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 8249ca76..f23c74b 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -119,22 +119,6 @@
kfree_rcu(v, rcu);
}
-/* Strip the tag from the packet. Will return skb with tci set 0. */
-static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
-{
- if (skb->protocol != htons(ETH_P_8021Q)) {
- skb->vlan_tci = 0;
- return skb;
- }
-
- skb->vlan_tci = 0;
- skb = vlan_untag(skb);
- if (skb)
- skb->vlan_tci = 0;
-
- return skb;
-}
-
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *pv,
struct sk_buff *skb)
@@ -144,13 +128,27 @@
if (!br->vlan_enabled)
goto out;
+ /* Vlan filter table must be configured at this point. The
+ * only exception is the bridge is set in promisc mode and the
+ * packet is destined for the bridge device. In this case
+ * pass the packet as is.
+ */
+ if (!pv) {
+ if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
+ goto out;
+ } else {
+ kfree_skb(skb);
+ return NULL;
+ }
+ }
+
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id is set in the untagged bitmap,
* send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, pv->untagged_bitmap))
- skb = br_vlan_untag(skb);
+ skb->vlan_tci = 0;
out:
return skb;
@@ -174,6 +172,18 @@
if (!v)
return false;
+ /* If vlan tx offload is disabled on bridge device and frame was
+ * sent from vlan device on the bridge device, it does not have
+ * HW accelerated vlan tag.
+ */
+ if (unlikely(!vlan_tx_tag_present(skb) &&
+ (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)))) {
+ skb = vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
err = br_vlan_get_tag(skb, vid);
if (!*vid) {
u16 pvid = br_get_pvid(v);
diff --git a/net/can/raw.c b/net/can/raw.c
index 8be757c..081e81f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -121,13 +121,9 @@
if (!ro->recv_own_msgs && oskb->sk == sk)
return;
- /* do not pass frames with DLC > 8 to a legacy socket */
- if (!ro->fd_frames) {
- struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
-
- if (unlikely(cfd->len > CAN_MAX_DLEN))
- return;
- }
+ /* do not pass non-CAN2.0 frames to a legacy socket */
+ if (!ro->fd_frames && oskb->len != CAN_MTU)
+ return;
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
@@ -738,9 +734,7 @@
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
- struct raw_sock *ro = raw_sk(sk);
struct sk_buff *skb;
- int rxmtu;
int err = 0;
int noblock;
@@ -751,20 +745,10 @@
if (!skb)
return err;
- /*
- * when serving a legacy socket the DLC <= 8 is already checked inside
- * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
- * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
- */
- if (!ro->fd_frames)
- rxmtu = CAN_MTU;
- else
- rxmtu = skb->len;
-
- if (size < rxmtu)
+ if (size < skb->len)
msg->msg_flags |= MSG_TRUNC;
else
- size = rxmtu;
+ size = skb->len;
err = memcpy_toiovec(msg->msg_iov, skb->data, size);
if (err < 0) {
diff --git a/net/compat.c b/net/compat.c
index f50161f..9a76eaf 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -384,8 +384,8 @@
return sock_setsockopt(sock, level, optname, optval, optlen);
}
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen)
+COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, unsigned int, optlen)
{
int err;
struct socket *sock = sockfd_lookup(fd, &err);
@@ -504,8 +504,8 @@
}
EXPORT_SYMBOL(compat_sock_get_timestampns);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen)
+COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int __user *, optlen)
{
int err;
struct socket *sock = sockfd_lookup(fd, &err);
@@ -735,15 +735,15 @@
};
#undef AL
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
-asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned int vlen, unsigned int flags)
+COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
@@ -751,28 +751,28 @@
flags | MSG_CMSG_COMPAT);
}
-asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
+COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags)
{
return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
}
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
- unsigned int flags, struct sockaddr __user *addr,
- int __user *addrlen)
+COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len,
+ unsigned int, flags, struct sockaddr __user *, addr,
+ int __user *, addrlen)
{
return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
}
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned int vlen, unsigned int flags,
- struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags,
+ struct compat_timespec __user *, timeout)
{
int datagrams;
struct timespec ktspec;
@@ -795,7 +795,7 @@
return datagrams;
}
-asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
+COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
{
int ret;
u32 a[6];
diff --git a/net/core/dev.c b/net/core/dev.c
index b1b0c8d..45fa2f1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2286,7 +2286,7 @@
}
EXPORT_SYMBOL(skb_checksum_help);
-__be16 skb_network_protocol(struct sk_buff *skb)
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
int vlan_depth = ETH_HLEN;
@@ -2313,6 +2313,8 @@
vlan_depth += VLAN_HLEN;
}
+ *depth = vlan_depth;
+
return type;
}
@@ -2326,12 +2328,13 @@
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_offload *ptype;
- __be16 type = skb_network_protocol(skb);
+ int vlan_depth = skb->mac_len;
+ __be16 type = skb_network_protocol(skb, &vlan_depth);
if (unlikely(!type))
return ERR_PTR(-EINVAL);
- __skb_pull(skb, skb->mac_len);
+ __skb_pull(skb, vlan_depth);
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
@@ -2498,8 +2501,10 @@
const struct net_device *dev,
netdev_features_t features)
{
+ int tmp;
+
if (skb->ip_summed != CHECKSUM_NONE &&
- !can_checksum_protocol(features, skb_network_protocol(skb))) {
+ !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(dev, skb)) {
features &= ~NETIF_F_SG;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b9e9e0d..e161290 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -766,9 +766,6 @@
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
- goto out;
-
/*
* periodically recompute ReachableTime from random function
*/
@@ -781,6 +778,9 @@
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
+ if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ goto out;
+
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
np = &nht->hash_buckets[i];
@@ -3046,7 +3046,7 @@
if (!t)
goto err;
- for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+ for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
t->neigh_vars[i].data += (long) p;
t->neigh_vars[i].extra1 = dev;
t->neigh_vars[i].extra2 = p;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a664f78..df9e6b1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -742,7 +742,7 @@
struct nd_msg *msg;
struct ipv6hdr *hdr;
- if (skb->protocol != htons(ETH_P_ARP))
+ if (skb->protocol != htons(ETH_P_IPV6))
return false;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
return false;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a0dac2..120eecc 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2121,12 +2121,13 @@
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
u8 *addr, u32 pid, u32 seq,
- int type, unsigned int flags)
+ int type, unsigned int flags,
+ int nlflags)
{
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
if (!nlh)
return -EMSGSIZE;
@@ -2164,7 +2165,7 @@
if (!skb)
goto errout;
- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -2389,7 +2390,8 @@
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
portid, seq,
- RTM_NEWNEIGH, NTF_SELF);
+ RTM_NEWNEIGH, NTF_SELF,
+ NLM_F_MULTI);
if (err < 0)
return err;
skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5976ef0..90b96a1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -707,9 +707,6 @@
new->mark = old->mark;
new->skb_iif = old->skb_iif;
__nf_copy(new, old);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- new->nf_trace = old->nf_trace;
-#endif
#ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index;
#ifdef CONFIG_NET_CLS_ACT
@@ -2130,25 +2127,31 @@
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
+ *
+ * Return value:
+ * 0: everything is OK
+ * -ENOMEM: couldn't orphan frags of @from due to lack of memory
+ * -EFAULT: skb_copy_bits() found some problem with skb geometry
*/
-void
-skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+int
+skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
+ int ret;
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
- if (len <= skb_tailroom(to)) {
- skb_copy_bits(from, 0, skb_put(to, len), len);
- return;
- }
+ if (len <= skb_tailroom(to))
+ return skb_copy_bits(from, 0, skb_put(to, len), len);
if (hlen) {
- skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ if (unlikely(ret))
+ return ret;
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
@@ -2166,6 +2169,11 @@
to->len += len + plen;
to->data_len += len + plen;
+ if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
+ skb_tx_error(from);
+ return -ENOMEM;
+ }
+
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
@@ -2176,6 +2184,8 @@
j++;
}
skb_shinfo(to)->nr_frags = j;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
@@ -2841,81 +2851,85 @@
/**
* skb_segment - Perform protocol segmentation on skb.
- * @skb: buffer to segment
+ * @head_skb: buffer to segment
* @features: features for the output path (see dev->features)
*
* This function performs segmentation on the given skb. It returns
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
- struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
- skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int doffset = skb->data - skb_mac_header(skb);
+ struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
+ skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+ unsigned int mss = skb_shinfo(head_skb)->gso_size;
+ unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+ struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
- unsigned int tnl_hlen = skb_tnl_header_len(skb);
+ unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int headroom;
unsigned int len;
__be16 proto;
bool csum;
int sg = !!(features & NETIF_F_SG);
- int nfrags = skb_shinfo(skb)->nr_frags;
+ int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
int pos;
+ int dummy;
- proto = skb_network_protocol(skb);
+ proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
- __skb_push(skb, doffset);
- headroom = skb_headroom(skb);
- pos = skb_headlen(skb);
+ __skb_push(head_skb, doffset);
+ headroom = skb_headroom(head_skb);
+ pos = skb_headlen(head_skb);
do {
struct sk_buff *nskb;
- skb_frag_t *frag;
+ skb_frag_t *nskb_frag;
int hsize;
int size;
- len = skb->len - offset;
+ len = head_skb->len - offset;
if (len > mss)
len = mss;
- hsize = skb_headlen(skb) - offset;
+ hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
- if (!hsize && i >= nfrags && skb_headlen(fskb) &&
- (skb_headlen(fskb) == len || sg)) {
- BUG_ON(skb_headlen(fskb) > len);
+ if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
+ (skb_headlen(list_skb) == len || sg)) {
+ BUG_ON(skb_headlen(list_skb) > len);
i = 0;
- nfrags = skb_shinfo(fskb)->nr_frags;
- skb_frag = skb_shinfo(fskb)->frags;
- pos += skb_headlen(fskb);
+ nfrags = skb_shinfo(list_skb)->nr_frags;
+ frag = skb_shinfo(list_skb)->frags;
+ frag_skb = list_skb;
+ pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
- size = skb_frag_size(skb_frag);
+ size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
- skb_frag++;
+ frag++;
}
- nskb = skb_clone(fskb, GFP_ATOMIC);
- fskb = fskb->next;
+ nskb = skb_clone(list_skb, GFP_ATOMIC);
+ list_skb = list_skb->next;
if (unlikely(!nskb))
goto err;
@@ -2936,7 +2950,7 @@
__skb_push(nskb, doffset);
} else {
nskb = __alloc_skb(hsize + doffset + headroom,
- GFP_ATOMIC, skb_alloc_rx_flag(skb),
+ GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
@@ -2952,12 +2966,12 @@
segs = nskb;
tail = nskb;
- __copy_skb_header(nskb, skb);
- nskb->mac_len = skb->mac_len;
+ __copy_skb_header(nskb, head_skb);
+ nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
- skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+ skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
@@ -2966,30 +2980,32 @@
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
- nskb->csum = skb_copy_and_csum_bits(skb, offset,
+ nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
continue;
}
- frag = skb_shinfo(nskb)->frags;
+ nskb_frag = skb_shinfo(nskb)->frags;
- skb_copy_from_linear_data_offset(skb, offset,
+ skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
- skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
- BUG_ON(skb_headlen(fskb));
+ BUG_ON(skb_headlen(list_skb));
i = 0;
- nfrags = skb_shinfo(fskb)->nr_frags;
- skb_frag = skb_shinfo(fskb)->frags;
+ nfrags = skb_shinfo(list_skb)->nr_frags;
+ frag = skb_shinfo(list_skb)->frags;
+ frag_skb = list_skb;
BUG_ON(!nfrags);
- fskb = fskb->next;
+ list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
@@ -3000,27 +3016,30 @@
goto err;
}
- *frag = *skb_frag;
- __skb_frag_ref(frag);
- size = skb_frag_size(frag);
+ if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
+ goto err;
+
+ *nskb_frag = *frag;
+ __skb_frag_ref(nskb_frag);
+ size = skb_frag_size(nskb_frag);
if (pos < offset) {
- frag->page_offset += offset - pos;
- skb_frag_size_sub(frag, offset - pos);
+ nskb_frag->page_offset += offset - pos;
+ skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
- skb_frag++;
+ frag++;
pos += size;
} else {
- skb_frag_size_sub(frag, pos + size - (offset + len));
+ skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
- frag++;
+ nskb_frag++;
}
skip_fraglist:
@@ -3034,7 +3053,7 @@
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
}
- } while ((offset += len) < skb->len);
+ } while ((offset += len) < head_skb->len);
return segs;
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6a943..c0fc6bd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2357,10 +2357,13 @@
if (sk->sk_backlog.tail)
__release_sock(sk);
+ /* Warning : release_cb() might need to release sk ownership,
+ * ie call sock_release_ownership(sk) before us.
+ */
if (sk->sk_prot->release_cb)
sk->sk_prot->release_cb(sk);
- sk->sk_lock.owned = 0;
+ sock_release_ownership(sk);
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
spin_unlock_bh(&sk->sk_lock.slock);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 327060c..7ae0d7f6 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -297,7 +297,7 @@
void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
{
- if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+ if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) {
WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
return;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ecd2c3f..19ab78a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1296,8 +1296,11 @@
segs = ERR_PTR(-EPROTONOSUPPORT);
- /* Note : following gso_segment() might change skb->encapsulation */
- udpfrag = !skb->encapsulation && proto == IPPROTO_UDP;
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+ udpfrag = proto == IPPROTO_UDP && encap;
+ else
+ udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment))
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 1863422f..250be74 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -182,6 +182,14 @@
int i;
bool csum_err = false;
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+ /* Looped back packet, drop it! */
+ if (rt_is_output_route(skb_rtable(skb)))
+ goto drop;
+ }
+#endif
+
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bb075fc..3b01959 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -208,7 +208,7 @@
}
work = frag_mem_limit(nf) - nf->low_thresh;
- while (work > 0) {
+ while (work > 0 || force) {
spin_lock(&nf->lru_lock);
if (list_empty(&nf->lru_list)) {
@@ -278,9 +278,10 @@
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
+ inet_frag_lru_add(nf, qp);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
- inet_frag_lru_add(nf, qp);
+
return qp;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8971780..73c6b63 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -422,9 +422,6 @@
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- to->nf_trace = from->nf_trace;
-#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 50228be..a82a22d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -93,13 +93,14 @@
tunnel_dst_set(t, NULL);
}
-static void tunnel_dst_reset_all(struct ip_tunnel *t)
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
{
int i;
for_each_possible_cpu(i)
__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}
+EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
{
@@ -119,52 +120,6 @@
return (struct rtable *)dst;
}
-/* Often modified stats are per cpu, other are shared (netdev->stats) */
-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
-{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *tstats =
- per_cpu_ptr(dev->tstats, i);
- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_bh(&tstats->syncp);
- rx_packets = tstats->rx_packets;
- tx_packets = tstats->tx_packets;
- rx_bytes = tstats->rx_bytes;
- tx_bytes = tstats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
-
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
- tot->rx_bytes += rx_bytes;
- tot->tx_bytes += tx_bytes;
- }
-
- tot->multicast = dev->stats.multicast;
-
- tot->rx_crc_errors = dev->stats.rx_crc_errors;
- tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
- tot->rx_length_errors = dev->stats.rx_length_errors;
- tot->rx_frame_errors = dev->stats.rx_frame_errors;
- tot->rx_errors = dev->stats.rx_errors;
-
- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
- tot->tx_dropped = dev->stats.tx_dropped;
- tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
- tot->tx_errors = dev->stats.tx_errors;
-
- tot->collisions = dev->stats.collisions;
-
- return tot;
-}
-EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
-
static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
__be16 flags, __be32 key)
{
@@ -461,9 +416,6 @@
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
- /* Looped back packet, drop it! */
- if (rt_is_output_route(skb_rtable(skb)))
- goto drop;
tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
@@ -759,7 +711,7 @@
if (set_mtu)
dev->mtu = mtu;
}
- tunnel_dst_reset_all(t);
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(dev);
}
@@ -1088,7 +1040,7 @@
if (itn->fb_tunnel_dev != dev)
ip_tunnel_del(netdev_priv(dev));
- tunnel_dst_reset_all(tunnel);
+ ip_tunnel_dst_reset_all(tunnel);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6156f4e..8d69626 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -148,3 +148,49 @@
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
+
+/* Often modified stats are per cpu, other are shared (netdev->stats) */
+struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *tstats =
+ per_cpu_ptr(dev->tstats, i);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+
+ tot->multicast = dev->stats.multicast;
+
+ tot->rx_crc_errors = dev->stats.rx_crc_errors;
+ tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_frame_errors = dev->stats.rx_frame_errors;
+ tot->rx_errors = dev->stats.rx_errors;
+
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ tot->collisions = dev->stats.collisions;
+
+ return tot;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b9b3472..28863570 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2255,13 +2255,14 @@
}
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2329,7 +2330,7 @@
if (skb == NULL)
goto errout;
- err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2368,7 +2369,8 @@
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2382,7 +2384,8 @@
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d551e31..7c67667 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1198,8 +1198,8 @@
map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
} else {
/* DNAT replies */
- map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
- map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
+ map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
+ map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
}
if (map.from == map.to)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9f3a2db..97c8f56 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1044,7 +1044,8 @@
}
}
-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+ int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
int err, flags;
@@ -1059,11 +1060,12 @@
if (unlikely(tp->fastopen_req == NULL))
return -ENOBUFS;
tp->fastopen_req->data = msg;
+ tp->fastopen_req->size = size;
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
msg->msg_namelen, flags);
- *size = tp->fastopen_req->copied;
+ *copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
return err;
}
@@ -1083,7 +1085,7 @@
flags = msg->msg_flags;
if (flags & MSG_FASTOPEN) {
- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
else if (err)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index ad37bf1..2388275 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -290,8 +290,7 @@
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size &&
- left < sk->sk_gso_max_segs)
+ left < tp->xmit_size_goal_segs)
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 227cba7..eeaac39 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1945,8 +1945,9 @@
if (skb == tcp_send_head(sk))
break;
- if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->undo_marker = 0;
+
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3cf9765..1e4eac7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2628,7 +2628,7 @@
{
__be32 dest, src;
__u16 destp, srcp;
- long delta = tw->tw_ttd - jiffies;
+ s32 delta = tw->tw_ttd - inet_tw_time_stamp();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3be1672..17a11e6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -767,6 +767,17 @@
if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
+ /* Here begins the tricky part :
+ * We are called from release_sock() with :
+ * 1) BH disabled
+ * 2) sk_lock.slock spinlock held
+ * 3) socket owned by us (sk->sk_lock.owned == 1)
+ *
+ * But following code is meant to be called from BH handlers,
+ * so we should keep BH disabled, but early release socket ownership
+ */
+ sock_release_ownership(sk);
+
if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
tcp_write_timer_handler(sk);
__sock_put(sk);
@@ -864,8 +875,8 @@
if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
fclone->fclone == SKB_FCLONE_CLONE))
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
@@ -2337,6 +2348,7 @@
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss;
+ int err;
/* Inconslusive MTU probe */
if (icsk->icsk_mtup.probe_size) {
@@ -2400,11 +2412,15 @@
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
GFP_ATOMIC);
- return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
- -ENOBUFS;
+ err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ -ENOBUFS;
} else {
- return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+
+ if (likely(!err))
+ TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+ return err;
}
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
@@ -2908,7 +2924,12 @@
space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE;
- syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+ space = min_t(size_t, space, fo->size);
+
+ /* limit to order-0 allocations */
+ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+
+ syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
sk->sk_allocation);
if (syn_data == NULL)
goto fallback;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index d92e558..438a73a 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -138,6 +138,7 @@
config IPV6_VTI
tristate "Virtual (secure) IPv6: tunneling"
select IPV6_TUNNEL
+ select NET_IP_TUNNEL
depends on INET6_XFRM_MODE_TUNNEL
---help---
Tunneling means encapsulating data of one protocol type within
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index fdbfeca..6c7fa08 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -133,10 +133,12 @@
static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(addrconf_hash_lock);
-static void addrconf_verify(unsigned long);
+static void addrconf_verify(void);
+static void addrconf_verify_rtnl(void);
+static void addrconf_verify_work(struct work_struct *);
-static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
-static DEFINE_SPINLOCK(addrconf_verify_lock);
+static struct workqueue_struct *addrconf_wq;
+static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
@@ -151,7 +153,7 @@
u32 flags, u32 noflags);
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
-static void addrconf_dad_timer(unsigned long data);
+static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(unsigned long data);
@@ -247,9 +249,9 @@
__in6_dev_put(idev);
}
-static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
+static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
{
- if (del_timer(&ifp->dad_timer))
+ if (cancel_delayed_work(&ifp->dad_work))
__in6_ifa_put(ifp);
}
@@ -261,12 +263,12 @@
mod_timer(&idev->rs_timer, jiffies + when);
}
-static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
- unsigned long when)
+static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+ unsigned long delay)
{
- if (!timer_pending(&ifp->dad_timer))
+ if (!delayed_work_pending(&ifp->dad_work))
in6_ifa_hold(ifp);
- mod_timer(&ifp->dad_timer, jiffies + when);
+ mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -751,8 +753,9 @@
in6_dev_put(ifp->idev);
- if (del_timer(&ifp->dad_timer))
- pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
+ if (cancel_delayed_work(&ifp->dad_work))
+ pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
+ ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
pr_warn("Freeing alive inet6 address %p\n", ifp);
@@ -849,8 +852,7 @@
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
- setup_timer(&ifa->dad_timer, addrconf_dad_timer,
- (unsigned long)ifa);
+ INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
@@ -990,6 +992,8 @@
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
unsigned long expires;
+ ASSERT_RTNL();
+
spin_lock_bh(&ifp->state_lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
@@ -1021,7 +1025,7 @@
write_unlock_bh(&ifp->idev->lock);
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
ipv6_ifa_notify(RTM_DELADDR, ifp);
@@ -1103,8 +1107,11 @@
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
* an implementation must not create a temporary address with a zero
* Preferred Lifetime.
+ * Use age calculation as in addrconf_verify to avoid unnecessary
+ * temporary addresses being generated.
*/
- if (tmp_prefered_lft <= regen_advance) {
+ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+ if (tmp_prefered_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
@@ -1601,7 +1608,7 @@
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
@@ -1622,20 +1629,21 @@
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
- } else
+ } else {
ipv6_del_addr(ifp);
+ }
}
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
{
int err = -ENOENT;
- spin_lock(&ifp->state_lock);
+ spin_lock_bh(&ifp->state_lock);
if (ifp->state == INET6_IFADDR_STATE_DAD) {
ifp->state = INET6_IFADDR_STATE_POSTDAD;
err = 0;
}
- spin_unlock(&ifp->state_lock);
+ spin_unlock_bh(&ifp->state_lock);
return err;
}
@@ -1668,7 +1676,12 @@
}
}
- addrconf_dad_stop(ifp, 1);
+ spin_lock_bh(&ifp->state_lock);
+ /* transition from _POSTDAD to _ERRDAD */
+ ifp->state = INET6_IFADDR_STATE_ERRDAD;
+ spin_unlock_bh(&ifp->state_lock);
+
+ addrconf_mod_dad_work(ifp, 0);
}
/* Join to solicited addr multicast group. */
@@ -1677,6 +1690,8 @@
{
struct in6_addr maddr;
+ ASSERT_RTNL();
+
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1688,6 +1703,8 @@
{
struct in6_addr maddr;
+ ASSERT_RTNL();
+
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1698,6 +1715,9 @@
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
+
+ ASSERT_RTNL();
+
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1709,6 +1729,9 @@
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
+
+ ASSERT_RTNL();
+
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -2268,11 +2291,13 @@
return;
}
- ifp->flags |= IFA_F_MANAGETEMPADDR;
update_lft = 0;
create = 1;
+ spin_lock_bh(&ifp->lock);
+ ifp->flags |= IFA_F_MANAGETEMPADDR;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
+ spin_unlock_bh(&ifp->lock);
addrconf_dad_start(ifp);
}
@@ -2323,7 +2348,7 @@
create, now);
in6_ifa_put(ifp);
- addrconf_verify(0);
+ addrconf_verify();
}
}
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
@@ -2472,7 +2497,7 @@
manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
true, jiffies);
in6_ifa_put(ifp);
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -3008,7 +3033,7 @@
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
- addrconf_del_dad_timer(ifa);
+ addrconf_del_dad_work(ifa);
goto restart;
}
}
@@ -3046,7 +3071,7 @@
while (!list_empty(&idev->addr_list)) {
ifa = list_first_entry(&idev->addr_list,
struct inet6_ifaddr, if_list);
- addrconf_del_dad_timer(ifa);
+ addrconf_del_dad_work(ifa);
list_del(&ifa->if_list);
@@ -3145,10 +3170,10 @@
rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
ifp->dad_probes = idev->cnf.dad_transmits;
- addrconf_mod_dad_timer(ifp, rand_num);
+ addrconf_mod_dad_work(ifp, rand_num);
}
-static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
@@ -3200,25 +3225,68 @@
read_unlock_bh(&idev->lock);
}
-static void addrconf_dad_timer(unsigned long data)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
+ bool begin_dad = false;
+
+ spin_lock_bh(&ifp->state_lock);
+ if (ifp->state != INET6_IFADDR_STATE_DEAD) {
+ ifp->state = INET6_IFADDR_STATE_PREDAD;
+ begin_dad = true;
+ }
+ spin_unlock_bh(&ifp->state_lock);
+
+ if (begin_dad)
+ addrconf_mod_dad_work(ifp, 0);
+}
+
+static void addrconf_dad_work(struct work_struct *w)
+{
+ struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
+ struct inet6_ifaddr,
+ dad_work);
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
+ enum {
+ DAD_PROCESS,
+ DAD_BEGIN,
+ DAD_ABORT,
+ } action = DAD_PROCESS;
+
+ rtnl_lock();
+
+ spin_lock_bh(&ifp->state_lock);
+ if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
+ action = DAD_BEGIN;
+ ifp->state = INET6_IFADDR_STATE_DAD;
+ } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
+ action = DAD_ABORT;
+ ifp->state = INET6_IFADDR_STATE_POSTDAD;
+ }
+ spin_unlock_bh(&ifp->state_lock);
+
+ if (action == DAD_BEGIN) {
+ addrconf_dad_begin(ifp);
+ goto out;
+ } else if (action == DAD_ABORT) {
+ addrconf_dad_stop(ifp, 1);
+ goto out;
+ }
+
if (!ifp->dad_probes && addrconf_dad_end(ifp))
goto out;
- write_lock(&idev->lock);
+ write_lock_bh(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY)) {
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
goto out;
}
spin_lock(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
goto out;
}
@@ -3229,7 +3297,7 @@
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -3237,16 +3305,17 @@
}
ifp->dad_probes--;
- addrconf_mod_dad_timer(ifp,
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
+ addrconf_mod_dad_work(ifp,
+ NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
+ rtnl_unlock();
}
/* ifp->idev must be at least read locked */
@@ -3273,7 +3342,7 @@
struct in6_addr lladdr;
bool send_rs, send_mld;
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
/*
* Configure the address for reception. Now it is valid.
@@ -3514,23 +3583,23 @@
* Periodic address status verification
*/
-static void addrconf_verify(unsigned long foo)
+static void addrconf_verify_rtnl(void)
{
unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
int i;
+ ASSERT_RTNL();
+
rcu_read_lock_bh();
- spin_lock(&addrconf_verify_lock);
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
- del_timer(&addr_chk_timer);
+ cancel_delayed_work(&addr_chk_work);
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- hlist_for_each_entry_rcu_bh(ifp,
- &inet6_addr_lst[i], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
unsigned long age;
/* When setting preferred_lft to a value not zero or
@@ -3625,13 +3694,22 @@
ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
now, next, next_sec, next_sched);
-
- addr_chk_timer.expires = next_sched;
- add_timer(&addr_chk_timer);
- spin_unlock(&addrconf_verify_lock);
+ mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
rcu_read_unlock_bh();
}
+static void addrconf_verify_work(struct work_struct *w)
+{
+ rtnl_lock();
+ addrconf_verify_rtnl();
+ rtnl_unlock();
+}
+
+static void addrconf_verify(void)
+{
+ mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
+}
+
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
struct in6_addr **peer_pfx)
{
@@ -3688,6 +3766,8 @@
bool was_managetempaddr;
bool had_prefixroute;
+ ASSERT_RTNL();
+
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
@@ -3753,7 +3833,7 @@
!was_managetempaddr, jiffies);
}
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -4383,6 +4463,8 @@
bool update_rs = false;
struct in6_addr ll_addr;
+ ASSERT_RTNL();
+
if (token == NULL)
return -EINVAL;
if (ipv6_addr_any(token))
@@ -4431,7 +4513,7 @@
}
write_unlock_bh(&idev->lock);
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -4633,6 +4715,9 @@
{
struct net *net = dev_net(ifp->idev->dev);
+ if (event)
+ ASSERT_RTNL();
+
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
switch (event) {
@@ -5241,6 +5326,12 @@
if (err < 0)
goto out_addrlabel;
+ addrconf_wq = create_workqueue("ipv6_addrconf");
+ if (!addrconf_wq) {
+ err = -ENOMEM;
+ goto out_nowq;
+ }
+
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
* before it can bring up and give link-local addresses
@@ -5271,7 +5362,7 @@
register_netdevice_notifier(&ipv6_dev_notf);
- addrconf_verify(0);
+ addrconf_verify();
rtnl_af_register(&inet6_ops);
@@ -5299,6 +5390,8 @@
rtnl_af_unregister(&inet6_ops);
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
+ destroy_workqueue(addrconf_wq);
+out_nowq:
unregister_pernet_subsys(&addrconf_ops);
out_addrlabel:
ipv6_addr_label_cleanup();
@@ -5334,7 +5427,8 @@
for (i = 0; i < IN6_ADDR_HSIZE; i++)
WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
spin_unlock_bh(&addrconf_hash_lock);
-
- del_timer(&addr_chk_timer);
+ cancel_delayed_work(&addr_chk_work);
rtnl_unlock();
+
+ destroy_workqueue(addrconf_wq);
}
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 140748d..8af3eb5 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -212,7 +212,7 @@
found = (nexthdr == target);
if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
- if (target < 0)
+ if (target < 0 || found)
break;
return -ENOENT;
}
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index cf77f3a..447a7fb 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -25,11 +25,11 @@
int ret;
ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
- if (!ret)
+ if (ret)
goto out;
ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
- if (!ret)
+ if (ret)
goto out_rt;
out:
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1e8683b..59f95af 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -89,7 +89,7 @@
unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
- bool tunnel;
+ bool encap, udpfrag;
int nhoff;
if (unlikely(skb_shinfo(skb)->gso_type &
@@ -110,8 +110,8 @@
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
- tunnel = SKB_GSO_CB(skb)->encap_level > 0;
- if (tunnel)
+ encap = SKB_GSO_CB(skb)->encap_level > 0;
+ if (encap)
features = skb->dev->hw_enc_features & netif_skb_features(skb);
SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
@@ -121,6 +121,12 @@
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+ udpfrag = proto == IPPROTO_UDP && encap;
+ else
+ udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
@@ -133,13 +139,9 @@
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
- if (tunnel) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
skb->network_header = (u8 *)ipv6h - skb->head;
- if (!tunnel && proto == IPPROTO_UDP) {
+ if (udpfrag) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
@@ -148,6 +150,8 @@
offset += (ntohs(ipv6h->payload_len) -
sizeof(struct frag_hdr));
}
+ if (encap)
+ skb_reset_inner_headers(skb);
}
out:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 070a2fa..64d6073 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -530,9 +530,6 @@
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- to->nf_trace = from->nf_trace;
-#endif
skb_copy_secmark(to, from);
}
@@ -1104,21 +1101,19 @@
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt,
- bool pmtuprobe)
+ unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
- *mtu = *mtu - rt->dst.header_len;
+ *mtu = orig_mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
- *mtu = min(*mtu, pmtuprobe ?
- rt->dst.dev->mtu :
- dst_mtu(rt->dst.path));
+ *mtu = orig_mtu;
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
@@ -1135,7 +1130,7 @@
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1217,6 +1212,7 @@
dst_exthdrlen = 0;
mtu = cork->fragsize;
}
+ orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1314,8 +1310,7 @@
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
- np->pmtudisc >=
- IPV6_PMTUDISC_PROBE);
+ orig_mtu);
skb_prev = skb;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0eb4038..8737400 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2349,13 +2349,14 @@
}
static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2423,7 +2424,7 @@
if (skb == NULL)
goto errout;
- err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2462,7 +2463,8 @@
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2476,7 +2478,8 @@
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index fb9beb7..587bbdc 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -135,6 +135,7 @@
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
+ fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 11dac21..fba54a4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1513,7 +1513,7 @@
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
if (!rt) {
err = -ENOMEM;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3dfbcf1..b4d74c8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -475,6 +475,7 @@
ipip6_tunnel_unlink(sitn, tunnel);
ipip6_tunnel_del_prl(tunnel, NULL);
}
+ ip_tunnel_dst_reset_all(tunnel);
dev_put(dev);
}
@@ -1082,6 +1083,7 @@
t->parms.link = p->link;
ipip6_tunnel_bind_dev(t->dev);
}
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(t->dev);
}
@@ -1112,6 +1114,7 @@
t->ip6rd.relay_prefix = relay_prefix;
t->ip6rd.prefixlen = ip6rd->prefixlen;
t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(t->dev);
return 0;
}
@@ -1271,6 +1274,7 @@
err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
break;
}
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(dev);
break;
@@ -1326,6 +1330,9 @@
static void ipip6_dev_free(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -1375,6 +1382,12 @@
u64_stats_init(&ipip6_tunnel_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -1405,6 +1418,12 @@
u64_stats_init(&ipip6_fb_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index e7359f9..b261ee8 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -113,7 +113,7 @@
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1a04c13..7932697 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -433,12 +433,13 @@
return 0;
}
-static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
+static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
struct xfrm_user_sec_ctx *uctx = NULL;
int ctx_size = sec_ctx->sadb_x_ctx_len;
- uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL);
+ uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
if (!uctx)
return NULL;
@@ -1124,7 +1125,7 @@
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
goto out;
@@ -2231,14 +2232,14 @@
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx) {
err = -ENOBUFS;
goto out;
}
- err = security_xfrm_policy_alloc(&xp->security, uctx);
+ err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
@@ -2335,12 +2336,12 @@
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
return -ENOMEM;
- err = security_xfrm_policy_alloc(&pol_ctx, uctx);
+ err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
return err;
@@ -3239,8 +3240,8 @@
}
if ((*dir = verify_sec_ctx_len(p)))
goto out;
- uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
- *dir = security_xfrm_policy_alloc(&xp->security, uctx);
+ uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC);
+ *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC);
kfree(uctx);
if (*dir)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 735d0f6..85d9d94 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -112,7 +112,6 @@
spinlock_t l2tp_session_hlist_lock;
};
-static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
@@ -1863,7 +1862,7 @@
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
*/
-static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1876,6 +1875,7 @@
}
}
+EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 1f01ba3..3f93ccd 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -263,6 +263,7 @@
int length, int (*payload_hook)(struct sk_buff *skb));
int l2tp_session_queue_purge(struct l2tp_session *session);
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+void l2tp_session_set_header_len(struct l2tp_session *session, int version);
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
int hdr_len);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4cfd722..bd7387a 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -578,8 +578,10 @@
if (info->attrs[L2TP_ATTR_RECV_SEQ])
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
- if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+ l2tp_session_set_header_len(session, session->tunnel->version);
+ }
if (info->attrs[L2TP_ATTR_LNS_MODE])
session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index be5fadf..5990919 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -254,12 +254,14 @@
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
- session->name);
+ l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ "%s: recv %d byte data frame, passing to L2TP socket\n",
+ session->name, data_len);
- /* Not bound. Nothing we can do, so discard. */
- atomic_long_inc(&session->stats.rx_errors);
- kfree_skb(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
+ atomic_long_inc(&session->stats.rx_errors);
+ kfree_skb(skb);
+ }
}
return;
@@ -1312,6 +1314,7 @@
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
+ l2tp_session_set_header_len(session, session->tunnel->version);
l2tp_info(session, PPPOL2TP_MSG_CONTROL,
"%s: set send_seq=%d\n",
session->name, session->send_seq);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f43613a..0c1ecfd 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -100,6 +100,12 @@
}
max_bw = max(max_bw, width);
}
+
+ /* use the configured bandwidth in case of monitor interface */
+ sdata = rcu_dereference(local->monitor_sdata);
+ if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
+ max_bw = max(max_bw, conf->def.width);
+
rcu_read_unlock();
return max_bw;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3701930..5e44e317 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1692,14 +1692,8 @@
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data);
-static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
-{
- ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
-}
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs);
void ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 2802f9d..ad8b377 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -36,6 +36,7 @@
sdata->vif.addr);
nullfunc->frame_control = fc;
nullfunc->duration_id = 0;
+ nullfunc->seq_ctrl = 0;
/* no address resolution for this frame -> set addr 1 immediately */
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index fc1d824..245dce9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -222,6 +222,7 @@
switch (vht_oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_USE_HT:
vht_chandef.width = chandef->width;
+ vht_chandef.center_freq1 = chandef->center_freq1;
break;
case IEEE80211_VHT_CHANWIDTH_80MHZ:
vht_chandef.width = NL80211_CHAN_WIDTH_80;
@@ -271,6 +272,28 @@
ret = 0;
out:
+ /*
+ * When tracking the current AP, don't do any further checks if the
+ * new chandef is identical to the one we're currently using for the
+ * connection. This keeps us from playing ping-pong with regulatory,
+ * without it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - the code below detects an unsupported channel, downgrades, and
+ * we disconnect from the AP in the caller
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+ if (tracking &&
+ cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
+ return ret;
+
/* don't print the message below for VHT mismatch if VHT is disabled */
if (ret & IEEE80211_STA_DISABLE_VHT)
vht_chandef = *chandef;
@@ -3753,6 +3776,7 @@
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
+ sta_info_free(local, new_sta);
return -EINVAL;
}
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c24ca0d..3e57f96 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1128,6 +1128,13 @@
sta->sta.addr, sta->sta.aid);
if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ /*
+ * Clear the flag only if the other one is still set
+ * so that the TX path won't start TX'ing new frames
+ * directly ... In the case that the driver flag isn't
+ * set ieee80211_sta_ps_deliver_wakeup() will clear it.
+ */
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
sta->sta.addr, sta->sta.aid);
return;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index decd30c..137a192 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,7 +91,7 @@
return -ENOENT;
}
-static void cleanup_single_sta(struct sta_info *sta)
+static void __cleanup_single_sta(struct sta_info *sta)
{
int ac, i;
struct tid_ampdu_tx *tid_tx;
@@ -99,7 +99,8 @@
struct ieee80211_local *local = sdata->local;
struct ps_data *ps;
- if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ps = &sdata->bss->ps;
@@ -109,6 +110,7 @@
return;
clear_sta_flag(sta, WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
atomic_dec(&ps->num_sta_ps);
sta_info_recalc_tim(sta);
@@ -139,7 +141,14 @@
ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
kfree(tid_tx);
}
+}
+static void cleanup_single_sta(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ __cleanup_single_sta(sta);
sta_info_free(local, sta);
}
@@ -330,6 +339,7 @@
rcu_read_unlock();
spin_lock_init(&sta->lock);
+ spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -487,21 +497,26 @@
goto out_err;
}
- /* notify driver */
- err = sta_info_insert_drv_state(local, sdata, sta);
- if (err)
- goto out_err;
-
local->num_sta++;
local->sta_generation++;
smp_mb();
+ /* simplify things and don't accept BA sessions yet */
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+
/* make the station visible */
sta_info_hash_add(local, sta);
list_add_rcu(&sta->list, &local->sta_list);
+ /* notify driver */
+ err = sta_info_insert_drv_state(local, sdata, sta);
+ if (err)
+ goto out_remove;
+
set_sta_flag(sta, WLAN_STA_INSERTED);
+ /* accept BA sessions now */
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_recalc_min_chandef(sdata);
ieee80211_sta_debugfs_add(sta);
@@ -522,6 +537,12 @@
mesh_accept_plinks_update(sdata);
return 0;
+ out_remove:
+ sta_info_hash_del(local, sta);
+ list_del_rcu(&sta->list);
+ local->num_sta--;
+ synchronize_net();
+ __cleanup_single_sta(sta);
out_err:
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
@@ -1071,10 +1092,14 @@
}
EXPORT_SYMBOL(ieee80211_find_sta);
-static void clear_sta_ps_flags(void *_sta)
+/* powersave support code */
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
- struct sta_info *sta = _sta;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
+ unsigned long flags;
struct ps_data *ps;
if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1085,20 +1110,6 @@
else
return;
- clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
- if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
- atomic_dec(&ps->num_sta_ps);
-}
-
-/* powersave support code */
-void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct ieee80211_local *local = sdata->local;
- struct sk_buff_head pending;
- int filtered = 0, buffered = 0, ac;
- unsigned long flags;
-
clear_sta_flag(sta, WLAN_STA_SP);
BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
@@ -1109,6 +1120,8 @@
skb_queue_head_init(&pending);
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ spin_lock(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1127,7 +1140,12 @@
buffered += tmp - count;
}
- ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+ ieee80211_add_pending_skbs(local, &pending);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+ spin_unlock(&sta->ps_lock);
+
+ atomic_dec(&ps->num_sta_ps);
/* This station just woke up and isn't aware of our SMPS state */
if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
@@ -1188,6 +1206,7 @@
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+ nullfunc->seq_ctrl = 0;
skb->priority = tid;
skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d77ff70..d3a6d82 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -267,6 +267,7 @@
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
* @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
* @ps_tx_buf: buffers (per AC) of frames to transmit to this station
* when it leaves power saving state or polls
* @tx_filtered: buffers (per AC) of frames we already tried to
@@ -356,10 +357,8 @@
/* use the accessors defined below */
unsigned long _flags;
- /*
- * STA powersave frame queues, no more than the internal
- * locking required.
- */
+ /* STA powersave lock and frame queues */
+ spinlock_t ps_lock;
struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
unsigned long driver_buffered_tids;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 97a02d3..4080c61 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -478,6 +478,20 @@
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
+
+ /* sync with ieee80211_sta_ps_deliver_wakeup */
+ spin_lock(&sta->ps_lock);
+ /*
+ * STA woke up the meantime and all the frames on ps_tx_buf have
+ * been queued to pending queue. No reordering can happen, go
+ * ahead and Tx the packet.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ spin_unlock(&sta->ps_lock);
+ return TX_CONTINUE;
+ }
+
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
@@ -492,6 +506,7 @@
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
+ spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 676dc09..b8700d4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -435,9 +435,8 @@
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
@@ -461,9 +460,6 @@
__skb_queue_tail(&local->pending[queue], skb);
}
- if (fn)
- fn(data);
-
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
@@ -1741,6 +1737,26 @@
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
/*
+ * Reconfigure sched scan if it was interrupted by FW restart or
+ * suspend.
+ */
+ mutex_lock(&local->mtx);
+ sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
+ lockdep_is_held(&local->mtx));
+ if (sched_scan_sdata && local->sched_scan_req)
+ /*
+ * Sched scan stopped, but we don't want to report it. Instead,
+ * we're trying to reschedule.
+ */
+ if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+ local->sched_scan_req))
+ sched_scan_stopped = true;
+ mutex_unlock(&local->mtx);
+
+ if (sched_scan_stopped)
+ cfg80211_sched_scan_stopped(local->hw.wiphy);
+
+ /*
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
@@ -1768,26 +1784,6 @@
WARN_ON(1);
#endif
- /*
- * Reconfigure sched scan if it was interrupted by FW restart or
- * suspend.
- */
- mutex_lock(&local->mtx);
- sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
- lockdep_is_held(&local->mtx));
- if (sched_scan_sdata && local->sched_scan_req)
- /*
- * Sched scan stopped, but we don't want to report it. Instead,
- * we're trying to reschedule.
- */
- if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
- local->sched_scan_req))
- sched_scan_stopped = true;
- mutex_unlock(&local->mtx);
-
- if (sched_scan_stopped)
- cfg80211_sched_scan_stopped(local->hw.wiphy);
-
return 0;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 21211c6..d51422c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -154,6 +154,11 @@
return IEEE80211_AC_BE;
}
+ if (skb->protocol == sdata->control_port_protocol) {
+ skb->priority = 7;
+ return ieee80211_downgrade_queue(sdata, skb);
+ }
+
/* use the data classifier to determine what 802.1d tag the
* data frame has */
rcu_read_lock();
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index bb322d0..b9f0e03 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1310,27 +1310,22 @@
}
static int
-ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
+ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_NAT_NEEDED
int ret;
- if (cda[CTA_NAT_DST]) {
- ret = ctnetlink_parse_nat_setup(ct,
- NF_NAT_MANIP_DST,
- cda[CTA_NAT_DST]);
- if (ret < 0)
- return ret;
- }
- if (cda[CTA_NAT_SRC]) {
- ret = ctnetlink_parse_nat_setup(ct,
- NF_NAT_MANIP_SRC,
- cda[CTA_NAT_SRC]);
- if (ret < 0)
- return ret;
- }
- return 0;
+ ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
+ cda[CTA_NAT_DST]);
+ if (ret < 0)
+ return ret;
+
+ ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
+ cda[CTA_NAT_SRC]);
+ return ret;
#else
+ if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+ return 0;
return -EOPNOTSUPP;
#endif
}
@@ -1659,11 +1654,9 @@
goto err2;
}
- if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
- err = ctnetlink_change_nat(ct, cda);
- if (err < 0)
- goto err2;
- }
+ err = ctnetlink_setup_nat(ct, cda);
+ if (err < 0)
+ goto err2;
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d3f5cd6..52ca952 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -432,15 +432,15 @@
}
EXPORT_SYMBOL(nf_nat_setup_info);
-unsigned int
-nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+static unsigned int
+__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{
/* Force range to this IP; let proto decide mapping for
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
* Use reply in case it's already been mangled (eg local packet).
*/
union nf_inet_addr ip =
- (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+ (manip == NF_NAT_MANIP_SRC ?
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
struct nf_nat_range range = {
@@ -448,7 +448,13 @@
.min_addr = ip,
.max_addr = ip,
};
- return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+ return nf_nat_setup_info(ct, &range, manip);
+}
+
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+ return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
}
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
@@ -702,9 +708,9 @@
static int
nfnetlink_parse_nat(const struct nlattr *nat,
- const struct nf_conn *ct, struct nf_nat_range *range)
+ const struct nf_conn *ct, struct nf_nat_range *range,
+ const struct nf_nat_l3proto *l3proto)
{
- const struct nf_nat_l3proto *l3proto;
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
@@ -714,38 +720,46 @@
if (err < 0)
return err;
- rcu_read_lock();
- l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
- if (l3proto == NULL) {
- err = -EAGAIN;
- goto out;
- }
err = l3proto->nlattr_to_range(tb, range);
if (err < 0)
- goto out;
+ return err;
if (!tb[CTA_NAT_PROTO])
- goto out;
+ return 0;
- err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
-out:
- rcu_read_unlock();
- return err;
+ return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
}
+/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_range range;
+ const struct nf_nat_l3proto *l3proto;
int err;
- err = nfnetlink_parse_nat(attr, ct, &range);
+ /* Should not happen, restricted to creating new conntracks
+ * via ctnetlink.
+ */
+ if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
+ return -EEXIST;
+
+ /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
+ * attach the null binding, otherwise this may oops.
+ */
+ l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+ if (l3proto == NULL)
+ return -EAGAIN;
+
+ /* No NAT information has been passed, allocate the null-binding */
+ if (attr == NULL)
+ return __nf_nat_alloc_null_binding(ct, manip);
+
+ err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
if (err < 0)
return err;
- if (nf_nat_initialized(ct, manip))
- return -EEXIST;
return nf_nat_setup_info(ct, &range, manip);
}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index f072fe8..108120f 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -354,13 +354,16 @@
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
- if (!skb)
+ if (!skb) {
+ skb_tx_error(entskb);
return NULL;
+ }
nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
sizeof(struct nfgenmsg), 0);
if (!nlh) {
+ skb_tx_error(entskb);
kfree_skb(skb);
return NULL;
}
@@ -488,13 +491,15 @@
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
- skb_zerocopy(skb, entskb, data_len, hlen);
+ if (skb_zerocopy(skb, entskb, data_len, hlen))
+ goto nla_put_failure;
}
nlh->nlmsg_len = skb->len;
return skb;
nla_put_failure:
+ skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e8254ad..425cf39 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -116,7 +116,7 @@
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID: {
const struct dst_entry *dst = skb_dst(skb);
@@ -199,7 +199,7 @@
case NFT_META_OIFTYPE:
case NFT_META_SKUID:
case NFT_META_SKGID:
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID:
#endif
#ifdef CONFIG_NETWORK_SECMARK
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index a2aeb31..85daa84 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -135,7 +135,8 @@
if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
return ERR_PTR(-EINVAL);
- if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+ if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
+ base != NFT_PAYLOAD_LL_HEADER)
return &nft_payload_fast_ops;
else
return &nft_payload_ops;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 8a310f2..b718a52 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -21,9 +21,9 @@
{
switch (pkt->ops->pf) {
case NFPROTO_IPV4:
- nft_reject_ipv4_eval(expr, data, pkt);
+ return nft_reject_ipv4_eval(expr, data, pkt);
case NFPROTO_IPV6:
- nft_reject_ipv6_eval(expr, data, pkt);
+ return nft_reject_ipv6_eval(expr, data, pkt);
}
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fdf5135..04748ab6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1489,8 +1489,8 @@
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
- /* Only superuser is allowed to send multicasts */
- if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+ if ((nladdr->nl_groups || nladdr->nl_pid) &&
+ !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
if (!nlk->portid)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 46bda01..56db888 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -301,7 +301,7 @@
rc = __nci_request(ndev, nci_reset_req, 0,
msecs_to_jiffies(NCI_RESET_TIMEOUT));
- if (ndev->ops->setup(ndev))
+ if (ndev->ops->setup)
ndev->ops->setup(ndev);
if (!rc) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e9a48ba..270b77d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -464,7 +464,9 @@
}
nla->nla_len = nla_attr_size(skb->len);
- skb_zerocopy(user_skb, skb, skb->len, hlen);
+ err = skb_zerocopy(user_skb, skb, skb->len, hlen);
+ if (err)
+ goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
@@ -478,6 +480,8 @@
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
out:
+ if (err)
+ skb_tx_error(skb);
kfree_skb(nskb);
return err;
}
@@ -1174,7 +1178,7 @@
struct datapath *dp;
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
- if (!dp)
+ if (IS_ERR(dp))
return;
WARN(dp->user_features, "Dropping previously announced user features\n");
@@ -1762,11 +1766,12 @@
int bucket = cb->args[0], skip = cb->args[1];
int i, j = 0;
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
- if (!dp)
- return -ENODEV;
-
rcu_read_lock();
+ dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ if (!dp) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 16f4b46..2998989 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -73,6 +73,7 @@
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
+ flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
@@ -91,7 +92,7 @@
unsigned long *used, __be16 *tcp_flags)
{
spin_lock(&stats->lock);
- if (time_after(stats->used, *used))
+ if (!*used || time_after(stats->used, *used))
*used = stats->used;
*tcp_flags |= stats->tcp_flags;
ovs_stats->n_packets += stats->packet_count;
@@ -102,30 +103,24 @@
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- int cpu, cur_cpu;
+ int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
+ local_bh_disable();
if (!flow->stats.is_percpu) {
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
} else {
- cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
- if (cpu == cur_cpu)
- local_bh_disable();
-
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
-
- if (cpu == cur_cpu)
- local_bh_enable();
}
- put_cpu();
}
+ local_bh_enable();
}
static void stats_reset(struct flow_stats *stats)
@@ -140,25 +135,17 @@
void ovs_flow_stats_clear(struct sw_flow *flow)
{
- int cpu, cur_cpu;
+ int cpu;
+ local_bh_disable();
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
- cur_cpu = get_cpu();
-
for_each_possible_cpu(cpu) {
-
- if (cpu == cur_cpu)
- local_bh_disable();
-
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
-
- if (cpu == cur_cpu)
- local_bh_enable();
}
- put_cpu();
}
+ local_bh_enable();
}
static int check_header(struct sk_buff *skb, int len)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1313145..a07d55e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -273,11 +273,12 @@
void qdisc_list_add(struct Qdisc *q)
{
- struct Qdisc *root = qdisc_dev(q)->qdisc;
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ struct Qdisc *root = qdisc_dev(q)->qdisc;
- WARN_ON_ONCE(root == &noop_qdisc);
- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+ WARN_ON_ONCE(root == &noop_qdisc);
list_add_tail(&q->list, &root->list);
+ }
}
EXPORT_SYMBOL(qdisc_list_add);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 08ef7a4..21e2517 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -601,6 +601,7 @@
{
struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
+ void *old_fq_root;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
@@ -615,13 +616,19 @@
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
- if (q->fq_root) {
- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- fq_free(q->fq_root);
- }
+ sch_tree_lock(sch);
+
+ old_fq_root = q->fq_root;
+ if (old_fq_root)
+ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
+
q->fq_root = array;
q->fq_trees_log = log;
+ sch_tree_unlock(sch);
+
+ fq_free(old_fq_root);
+
return 0;
}
@@ -697,9 +704,11 @@
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
- if (!err)
+ if (!err) {
+ sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
-
+ sch_tree_lock(sch);
+ }
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1cb413f..4f505a0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -334,18 +334,6 @@
qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
tb[TCA_TBF_PTAB]));
- if (q->qdisc != &noop_qdisc) {
- err = fifo_set_limit(q->qdisc, qopt->limit);
- if (err)
- goto done;
- } else if (qopt->limit > 0) {
- child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto done;
- }
- }
-
buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -390,6 +378,18 @@
goto done;
}
+ if (q->qdisc != &noop_qdisc) {
+ err = fifo_set_limit(q->qdisc, qopt->limit);
+ if (err)
+ goto done;
+ } else if (qopt->limit > 0) {
+ child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto done;
+ }
+ }
+
sch_tree_lock(sch);
if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f558433..ee13d28 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1239,78 +1239,107 @@
}
/* Update the retran path for sending a retransmitted packet.
- * Round-robin through the active transports, else round-robin
- * through the inactive transports as this is the next best thing
- * we can try.
+ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
+ *
+ * When there is outbound data to send and the primary path
+ * becomes inactive (e.g., due to failures), or where the
+ * SCTP user explicitly requests to send data to an
+ * inactive destination transport address, before reporting
+ * an error to its ULP, the SCTP endpoint should try to send
+ * the data to an alternate active destination transport
+ * address if one exists.
+ *
+ * When retransmitting data that timed out, if the endpoint
+ * is multihomed, it should consider each source-destination
+ * address pair in its retransmission selection policy.
+ * When retransmitting timed-out data, the endpoint should
+ * attempt to pick the most divergent source-destination
+ * pair from the original source-destination pair to which
+ * the packet was transmitted.
+ *
+ * Note: Rules for picking the most divergent source-destination
+ * pair are an implementation decision and are not specified
+ * within this document.
+ *
+ * Our basic strategy is to round-robin transports in priorities
+ * according to sctp_state_prio_map[] e.g., if no such
+ * transport with state SCTP_ACTIVE exists, round-robin through
+ * SCTP_UNKNOWN, etc. You get the picture.
*/
-void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+static const u8 sctp_trans_state_to_prio_map[] = {
+ [SCTP_ACTIVE] = 3, /* best case */
+ [SCTP_UNKNOWN] = 2,
+ [SCTP_PF] = 1,
+ [SCTP_INACTIVE] = 0, /* worst case */
+};
+
+static u8 sctp_trans_score(const struct sctp_transport *trans)
{
- struct sctp_transport *t, *next;
- struct list_head *head = &asoc->peer.transport_addr_list;
- struct list_head *pos;
-
- if (asoc->peer.transport_count == 1)
- return;
-
- /* Find the next transport in a round-robin fashion. */
- t = asoc->peer.retran_path;
- pos = &t->transports;
- next = NULL;
-
- while (1) {
- /* Skip the head. */
- if (pos->next == head)
- pos = head->next;
- else
- pos = pos->next;
-
- t = list_entry(pos, struct sctp_transport, transports);
-
- /* We have exhausted the list, but didn't find any
- * other active transports. If so, use the next
- * transport.
- */
- if (t == asoc->peer.retran_path) {
- t = next;
- break;
- }
-
- /* Try to find an active transport. */
-
- if ((t->state == SCTP_ACTIVE) ||
- (t->state == SCTP_UNKNOWN)) {
- break;
- } else {
- /* Keep track of the next transport in case
- * we don't find any active transport.
- */
- if (t->state != SCTP_UNCONFIRMED && !next)
- next = t;
- }
- }
-
- if (t)
- asoc->peer.retran_path = t;
- else
- t = asoc->peer.retran_path;
-
- pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
- &t->ipaddr.sa);
+ return sctp_trans_state_to_prio_map[trans->state];
}
-/* Choose the transport for sending retransmit packet. */
-struct sctp_transport *sctp_assoc_choose_alter_transport(
- struct sctp_association *asoc, struct sctp_transport *last_sent_to)
+static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
+ struct sctp_transport *best)
+{
+ if (best == NULL)
+ return curr;
+
+ return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
+}
+
+void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+{
+ struct sctp_transport *trans = asoc->peer.retran_path;
+ struct sctp_transport *trans_next = NULL;
+
+ /* We're done as we only have the one and only path. */
+ if (asoc->peer.transport_count == 1)
+ return;
+ /* If active_path and retran_path are the same and active,
+ * then this is the only active path. Use it.
+ */
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ asoc->peer.active_path->state == SCTP_ACTIVE)
+ return;
+
+ /* Iterate from retran_path's successor back to retran_path. */
+ for (trans = list_next_entry(trans, transports); 1;
+ trans = list_next_entry(trans, transports)) {
+ /* Manually skip the head element. */
+ if (&trans->transports == &asoc->peer.transport_addr_list)
+ continue;
+ if (trans->state == SCTP_UNCONFIRMED)
+ continue;
+ trans_next = sctp_trans_elect_best(trans, trans_next);
+ /* Active is good enough for immediate return. */
+ if (trans_next->state == SCTP_ACTIVE)
+ break;
+ /* We've reached the end, time to update path. */
+ if (trans == asoc->peer.retran_path)
+ break;
+ }
+
+ if (trans_next != NULL)
+ asoc->peer.retran_path = trans_next;
+
+ pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
+ __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
+}
+
+struct sctp_transport *
+sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
+ struct sctp_transport *last_sent_to)
{
/* If this is the first time packet is sent, use the active path,
* else use the retran path. If the last packet was sent over the
* retran path, update the retran path and use it.
*/
- if (!last_sent_to)
+ if (last_sent_to == NULL) {
return asoc->peer.active_path;
- else {
+ } else {
if (last_sent_to == asoc->peer.retran_path)
sctp_assoc_update_retran_path(asoc);
+
return asoc->peer.retran_path;
}
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 632090b..3a1767e 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1421,8 +1421,8 @@
BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
- /* Free the chunk skb data and the SCTP_chunk stub itself. */
- dev_kfree_skb(chunk->skb);
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
SCTP_DBG_OBJCNT_DEC(chunk);
kmem_cache_free(sctp_chunk_cachep, chunk);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bd85915..5d6883f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -495,11 +495,12 @@
}
/* If the transport error count is greater than the pf_retrans
- * threshold, and less than pathmaxrtx, then mark this transport
- * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
- * point 1
+ * threshold, and less than pathmaxrtx, and if the current state
+ * is not SCTP_UNCONFIRMED, then mark this transport as Partially
+ * Failed, see SCTP Quick Failover Draft, section 5.1
*/
if ((transport->state != SCTP_PF) &&
+ (transport->state != SCTP_UNCONFIRMED) &&
(asoc->pf_retrans < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 591b44d..01e0024 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -758,6 +758,12 @@
struct sctp_chunk auth;
sctp_ierror_t ret;
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
@@ -768,10 +774,6 @@
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-
- /* We can now safely free the auth_chunk clone */
- kfree_skb(chunk->auth_chunk);
-
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
diff --git a/net/socket.c b/net/socket.c
index 879933a..a19ae19 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -450,16 +450,17 @@
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
{
- struct file *file;
+ struct fd f = fdget(fd);
struct socket *sock;
*err = -EBADF;
- file = fget_light(fd, fput_needed);
- if (file) {
- sock = sock_from_file(file, err);
- if (sock)
+ if (f.file) {
+ sock = sock_from_file(f.file, err);
+ if (likely(sock)) {
+ *fput_needed = f.flags;
return sock;
- fput_light(file, *fput_needed);
+ }
+ fdput(f);
}
return NULL;
}
@@ -1985,6 +1986,10 @@
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
+
+ if (kmsg->msg_namelen < 0)
+ return -EINVAL;
+
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6c0513a..36e431e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -108,6 +108,7 @@
static DEFINE_SPINLOCK(pipe_version_lock);
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
+static void gss_put_auth(struct gss_auth *gss_auth);
static void gss_free_ctx(struct gss_cl_ctx *);
static const struct rpc_pipe_ops gss_upcall_ops_v0;
@@ -320,6 +321,7 @@
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
+ gss_put_auth(gss_msg->auth);
kfree(gss_msg);
}
@@ -498,9 +500,12 @@
default:
err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
if (err)
- goto err_free_msg;
+ goto err_put_pipe_version;
};
+ kref_get(&gss_auth->kref);
return gss_msg;
+err_put_pipe_version:
+ put_pipe_version(gss_auth->net);
err_free_msg:
kfree(gss_msg);
err:
@@ -991,6 +996,8 @@
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
if (gss_auth->service == 0)
goto err_put_mech;
+ if (!gssd_running(gss_auth->net))
+ goto err_put_mech;
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
auth->au_rslack = GSS_VERF_SLACK >> 2;
@@ -1062,6 +1069,12 @@
}
static void
+gss_put_auth(struct gss_auth *gss_auth)
+{
+ kref_put(&gss_auth->kref, gss_free_callback);
+}
+
+static void
gss_destroy(struct rpc_auth *auth)
{
struct gss_auth *gss_auth = container_of(auth,
@@ -1082,7 +1095,7 @@
gss_auth->gss_pipe[1] = NULL;
rpcauth_destroy_credcache(auth);
- kref_put(&gss_auth->kref, gss_free_callback);
+ gss_put_auth(gss_auth);
}
/*
@@ -1253,7 +1266,7 @@
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
gss_put_ctx(ctx);
- kref_put(&gss_auth->kref, gss_free_callback);
+ gss_put_auth(gss_auth);
}
static void
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 890a299..e860d4f 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,7 +64,6 @@
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
- list_del(&req->rq_bc_pa_list);
kfree(req);
}
@@ -168,8 +167,10 @@
/*
* Memory allocation failed, free the temporary list
*/
- list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
+ }
dprintk("RPC: setup backchannel transport failed\n");
return -ENOMEM;
@@ -198,6 +199,7 @@
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
if (--max_reqs == 0)
break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 817a1e5..0addefc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -510,6 +510,7 @@
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct sock *sk = transport->inet;
int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
@@ -527,7 +528,7 @@
* window size
*/
set_bit(SOCK_NOSPACE, &transport->sock->flags);
- transport->inet->sk_write_pending++;
+ sk->sk_write_pending++;
/* ...and wait for more buffer space */
xprt_wait_for_buffer_space(task, xs_nospace_callback);
}
@@ -537,6 +538,9 @@
}
spin_unlock_bh(&xprt->transport_lock);
+
+ /* Race breaker in case memory is freed before above code is called */
+ sk->sk_write_space(sk);
return ret;
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a38c899..574b861 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -610,8 +610,13 @@
int tipc_bearer_setup(void)
{
+ int err;
+
+ err = register_netdevice_notifier(¬ifier);
+ if (err)
+ return err;
dev_add_pack(&tipc_packet_type);
- return register_netdevice_notifier(¬ifier);
+ return 0;
}
void tipc_bearer_cleanup(void)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c301a9a..e6d7216 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -181,7 +181,7 @@
if (tipc_own_addr)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change node address once assigned)");
- tipc_core_start_net(addr);
+ tipc_net_start(addr);
return tipc_cfg_reply_none();
}
@@ -376,7 +376,6 @@
struct tipc_cfg_msg_hdr *req_hdr;
struct tipc_cfg_msg_hdr *rep_hdr;
struct sk_buff *rep_buf;
- int ret;
/* Validate configuration message header (ignore invalid message) */
req_hdr = (struct tipc_cfg_msg_hdr *)buf;
@@ -398,12 +397,8 @@
memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
rep_hdr->tcm_len = htonl(rep_buf->len);
rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
-
- ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
- rep_buf->len);
- if (ret < 0)
- pr_err("Sending cfg reply message failed, no memory\n");
-
+ tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+ rep_buf->len);
kfree_skb(rep_buf);
}
}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f9e88d8..80c2064 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -77,37 +77,13 @@
}
/**
- * tipc_core_stop_net - shut down TIPC networking sub-systems
- */
-static void tipc_core_stop_net(void)
-{
- tipc_net_stop();
- tipc_bearer_cleanup();
-}
-
-/**
- * start_net - start TIPC networking sub-systems
- */
-int tipc_core_start_net(unsigned long addr)
-{
- int res;
-
- tipc_net_start(addr);
- res = tipc_bearer_setup();
- if (res < 0)
- goto err;
- return res;
-
-err:
- tipc_core_stop_net();
- return res;
-}
-
-/**
* tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
*/
static void tipc_core_stop(void)
{
+ tipc_handler_stop();
+ tipc_net_stop();
+ tipc_bearer_cleanup();
tipc_netlink_stop();
tipc_cfg_stop();
tipc_subscr_stop();
@@ -122,30 +98,65 @@
*/
static int tipc_core_start(void)
{
- int res;
+ int err;
get_random_bytes(&tipc_random, sizeof(tipc_random));
- res = tipc_handler_start();
- if (!res)
- res = tipc_ref_table_init(tipc_max_ports, tipc_random);
- if (!res)
- res = tipc_nametbl_init();
- if (!res)
- res = tipc_netlink_start();
- if (!res)
- res = tipc_socket_init();
- if (!res)
- res = tipc_register_sysctl();
- if (!res)
- res = tipc_subscr_start();
- if (!res)
- res = tipc_cfg_init();
- if (res) {
- tipc_handler_stop();
- tipc_core_stop();
- }
- return res;
+ err = tipc_handler_start();
+ if (err)
+ goto out_handler;
+
+ err = tipc_ref_table_init(tipc_max_ports, tipc_random);
+ if (err)
+ goto out_reftbl;
+
+ err = tipc_nametbl_init();
+ if (err)
+ goto out_nametbl;
+
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_socket_init();
+ if (err)
+ goto out_socket;
+
+ err = tipc_register_sysctl();
+ if (err)
+ goto out_sysctl;
+
+ err = tipc_subscr_start();
+ if (err)
+ goto out_subscr;
+
+ err = tipc_cfg_init();
+ if (err)
+ goto out_cfg;
+
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+
+ return 0;
+out_bearer:
+ tipc_cfg_stop();
+out_cfg:
+ tipc_subscr_stop();
+out_subscr:
+ tipc_unregister_sysctl();
+out_sysctl:
+ tipc_socket_stop();
+out_socket:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_nametbl_stop();
+out_nametbl:
+ tipc_ref_table_stop();
+out_reftbl:
+ tipc_handler_stop();
+out_handler:
+ return err;
}
static int __init tipc_init(void)
@@ -174,8 +185,6 @@
static void __exit tipc_exit(void)
{
- tipc_handler_stop();
- tipc_core_stop_net();
tipc_core_stop();
pr_info("Deactivated\n");
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5569d96..4dfe137 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -90,7 +90,6 @@
/*
* Routines available to privileged subsystems
*/
-int tipc_core_start_net(unsigned long);
int tipc_handler_start(void);
void tipc_handler_stop(void);
int tipc_netlink_start(void);
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index e4bc8a2..1fabf16 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -58,7 +58,6 @@
spin_lock_bh(&qitem_lock);
if (!handler_enabled) {
- pr_err("Signal request ignored by handler\n");
spin_unlock_bh(&qitem_lock);
return -ENOPROTOOPT;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 92a1533..042e8e3 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -941,20 +941,48 @@
return 0;
}
+/**
+ * tipc_purge_publications - remove all publications for a given type
+ *
+ * tipc_nametbl_lock must be held when calling this function
+ */
+static void tipc_purge_publications(struct name_seq *seq)
+{
+ struct publication *publ, *safe;
+ struct sub_seq *sseq;
+ struct name_info *info;
+
+ if (!seq->sseqs) {
+ nameseq_delete_empty(seq);
+ return;
+ }
+ sseq = seq->sseqs;
+ info = sseq->info;
+ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
+ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ }
+}
+
void tipc_nametbl_stop(void)
{
u32 i;
+ struct name_seq *seq;
+ struct hlist_head *seq_head;
+ struct hlist_node *safe;
- if (!table.types)
- return;
-
- /* Verify name table is empty, then release it */
+ /* Verify name table is empty and purge any lingering
+ * publications, then release the name table
+ */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
if (hlist_empty(&table.types[i]))
continue;
- pr_err("nametbl_stop(): orphaned hash chain detected\n");
- break;
+ seq_head = &table.types[i];
+ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ tipc_purge_publications(seq);
+ }
+ continue;
}
kfree(table.types);
table.types = NULL;
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 9f72a63..3aaf73d 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -83,8 +83,6 @@
},
};
-static int tipc_genl_family_registered;
-
int tipc_netlink_start(void)
{
int res;
@@ -94,16 +92,10 @@
pr_err("Failed to register netlink interface\n");
return res;
}
-
- tipc_genl_family_registered = 1;
return 0;
}
void tipc_netlink_stop(void)
{
- if (!tipc_genl_family_registered)
- return;
-
genl_unregister_family(&tipc_genl_family);
- tipc_genl_family_registered = 0;
}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 2a2a938..de3d593 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -126,9 +126,6 @@
*/
void tipc_ref_table_stop(void)
{
- if (!tipc_ref_table.entries)
- return;
-
vfree(tipc_ref_table.entries);
tipc_ref_table.entries = NULL;
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index b635ca3..646a930 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -87,7 +87,6 @@
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct tipc_server *s = con->server;
if (con->sock) {
tipc_sock_release_local(con->sock);
@@ -95,10 +94,6 @@
}
tipc_clean_outqueues(con);
-
- if (con->conid)
- s->tipc_conn_shutdown(con->conid, con->usr_data);
-
kfree(con);
}
@@ -181,6 +176,9 @@
struct tipc_server *s = con->server;
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ if (con->conid)
+ s->tipc_conn_shutdown(con->conid, con->usr_data);
+
spin_lock_bh(&s->idr_lock);
idr_remove(&s->conn_idr, con->conid);
s->idr_in_use--;
@@ -429,10 +427,12 @@
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags))
+ if (test_bit(CF_CONNECTED, &con->flags)) {
if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
-
+ } else {
+ conn_put(con);
+ }
return 0;
}
@@ -573,7 +573,6 @@
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
- s->enabled = 1;
return ret;
}
@@ -583,10 +582,6 @@
int total = 0;
int id;
- if (!s->enabled)
- return;
-
- s->enabled = 0;
spin_lock_bh(&s->idr_lock);
for (id = 0; total < s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 98b23f2..be817b0 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -56,7 +56,6 @@
* @name: server name
* @imp: message importance
* @type: socket type
- * @enabled: identify whether server is launched or not
*/
struct tipc_server {
struct idr conn_idr;
@@ -74,7 +73,6 @@
const char name[TIPC_SERVER_NAME_LEN];
int imp;
int type;
- int enabled;
};
int tipc_conn_sendmsg(struct tipc_server *s, int conid,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index aab4948..0ed0eaa 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -70,8 +70,6 @@
static struct proto tipc_proto;
static struct proto tipc_proto_kern;
-static int sockets_enabled;
-
/*
* Revised TIPC socket locking policy:
*
@@ -999,7 +997,7 @@
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
err = -ENOTCONN;
break;
@@ -1625,7 +1623,7 @@
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
@@ -2027,8 +2025,6 @@
proto_unregister(&tipc_proto);
goto out;
}
-
- sockets_enabled = 1;
out:
return res;
}
@@ -2038,10 +2034,6 @@
*/
void tipc_socket_stop(void)
{
- if (!sockets_enabled)
- return;
-
- sockets_enabled = 0;
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 7cb0bd5..6424372 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -96,20 +96,16 @@
{
struct tipc_subscriber *subscriber = sub->subscriber;
struct kvec msg_sect;
- int ret;
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
-
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
- ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
- msg_sect.iov_base, msg_sect.iov_len);
- if (ret < 0)
- pr_err("Sending subscription event failed, no memory\n");
+ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
+ msg_sect.iov_len);
}
/**
@@ -153,14 +149,6 @@
/* The spin lock per subscriber is used to protect its members */
spin_lock_bh(&subscriber->lock);
- /* Validate if the connection related to the subscriber is
- * closed (in case subscriber is terminating)
- */
- if (subscriber->conid == 0) {
- spin_unlock_bh(&subscriber->lock);
- return;
- }
-
/* Validate timeout (in case subscription is being cancelled) */
if (sub->timeout == TIPC_WAIT_FOREVER) {
spin_unlock_bh(&subscriber->lock);
@@ -215,9 +203,6 @@
spin_lock_bh(&subscriber->lock);
- /* Invalidate subscriber reference */
- subscriber->conid = 0;
-
/* Destroy any existing subscriptions for subscriber */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
@@ -278,9 +263,9 @@
*
* Called with subscriber lock held.
*/
-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
- struct tipc_subscriber *subscriber)
-{
+static int subscr_subscribe(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber,
+ struct tipc_subscription **sub_p) {
struct tipc_subscription *sub;
int swap;
@@ -291,23 +276,21 @@
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
- return NULL;
+ return 0;
}
/* Refuse subscription if global limit exceeded */
if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
pr_warn("Subscription rejected, limit reached (%u)\n",
TIPC_MAX_SUBSCRIPTIONS);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
pr_warn("Subscription rejected, no memory\n");
- subscr_terminate(subscriber);
- return NULL;
+ return -ENOMEM;
}
/* Initialize subscription object */
@@ -321,8 +304,7 @@
(sub->seq.lower > sub->seq.upper)) {
pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
@@ -335,8 +317,8 @@
(Handler)subscr_timeout, (unsigned long)sub);
k_start_timer(&sub->timer, sub->timeout);
}
-
- return sub;
+ *sub_p = sub;
+ return 0;
}
/* Handle one termination request for the subscriber */
@@ -350,10 +332,14 @@
void *usr_data, void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
- struct tipc_subscription *sub;
+ struct tipc_subscription *sub = NULL;
spin_lock_bh(&subscriber->lock);
- sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
+ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+ spin_unlock_bh(&subscriber->lock);
+ subscr_terminate(subscriber);
+ return;
+ }
if (sub)
tipc_nametbl_subscribe(sub);
spin_unlock_bh(&subscriber->lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 29fc8be..94404f1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -163,9 +163,8 @@
static inline unsigned int unix_hash_fold(__wsum n)
{
- unsigned int hash = (__force unsigned int)n;
+ unsigned int hash = (__force unsigned int)csum_fold(n);
- hash ^= hash>>16;
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
}
@@ -1788,8 +1787,11 @@
goto out;
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
@@ -1914,6 +1916,7 @@
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
int copied = 0;
+ int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
@@ -1929,7 +1932,7 @@
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -1941,8 +1944,11 @@
}
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(timeo);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 010892b..a3bf18d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -788,8 +788,6 @@
default:
break;
}
-
- wdev->beacon_interval = 0;
}
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 9b897fc..f054137 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1700,7 +1700,7 @@
return;
case NL80211_REGDOM_SET_BY_USER:
treatment = reg_process_hint_user(reg_request);
- if (treatment == REG_REQ_OK ||
+ if (treatment == REG_REQ_IGNORE ||
treatment == REG_REQ_ALREADY_SET)
return;
schedule_delayed_work(®_timeout, msecs_to_jiffies(3142));
@@ -2373,6 +2373,7 @@
int set_regdom(const struct ieee80211_regdomain *rd)
{
struct regulatory_request *lr;
+ bool user_reset = false;
int r;
if (!reg_is_valid_request(rd->alpha2)) {
@@ -2389,6 +2390,7 @@
break;
case NL80211_REGDOM_SET_BY_USER:
r = reg_set_rd_user(rd, lr);
+ user_reset = true;
break;
case NL80211_REGDOM_SET_BY_DRIVER:
r = reg_set_rd_driver(rd, lr);
@@ -2402,8 +2404,14 @@
}
if (r) {
- if (r == -EALREADY)
+ switch (r) {
+ case -EALREADY:
reg_set_request_processed();
+ break;
+ default:
+ /* Back to world regulatory in case of errors */
+ restore_regulatory_settings(user_reset);
+ }
kfree(rd);
return r;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4b98b25..1d5c7bf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1158,7 +1158,7 @@
if (hlist_unhashed(&pol->bydst))
return NULL;
- hlist_del(&pol->bydst);
+ hlist_del_init(&pol->bydst);
hlist_del(&pol->byidx);
list_del(&pol->walk.all);
net->xfrm.policy_count[dir]--;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a26b7aa..40f1b3e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1159,6 +1159,11 @@
}
x->props.aalgo = orig->props.aalgo;
+ if (orig->aead) {
+ x->aead = xfrm_algo_aead_clone(orig->aead);
+ if (!x->aead)
+ goto error;
+ }
if (orig->ealg) {
x->ealg = xfrm_algo_clone(orig->ealg);
if (!x->ealg)
@@ -1201,6 +1206,9 @@
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
+ x->tfcpad = orig->tfcpad;
+ x->replay_maxdiff = orig->replay_maxdiff;
+ x->replay_maxage = orig->replay_maxage;
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
@@ -1215,11 +1223,12 @@
return NULL;
}
-/* net->xfrm.xfrm_state_lock is held */
struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
{
unsigned int h;
- struct xfrm_state *x;
+ struct xfrm_state *x = NULL;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (m->reqid) {
h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1236,7 +1245,7 @@
m->old_family))
continue;
xfrm_state_hold(x);
- return x;
+ break;
}
} else {
h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1251,11 +1260,13 @@
m->old_family))
continue;
xfrm_state_hold(x);
- return x;
+ break;
}
}
- return NULL;
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ return x;
}
EXPORT_SYMBOL(xfrm_migrate_state_find);
@@ -1451,7 +1462,7 @@
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
- struct net *net = xs_net(*dst);
+ struct net *net = xs_net(*src);
if (!afinfo)
return -EAFNOSUPPORT;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1ae3ec7..2f7ddc3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -32,11 +32,6 @@
#include <linux/in6.h>
#endif
-static inline int aead_len(struct xfrm_algo_aead *alg)
-{
- return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
-}
-
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
{
struct nlattr *rt = attrs[type];
@@ -1226,7 +1221,7 @@
return 0;
uctx = nla_data(rt);
- return security_xfrm_policy_alloc(&pol->security, uctx);
+ return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
}
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
@@ -1631,7 +1626,7 @@
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
- err = security_xfrm_policy_alloc(&ctx, uctx);
+ err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
@@ -1933,7 +1928,7 @@
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
- err = security_xfrm_policy_alloc(&ctx, uctx);
+ err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 547e15d..93a0da2 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -155,6 +155,15 @@
# Important: no spaces around options
ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
+# ld-version
+# Usage: $(call ld-version)
+# Note this is mainly for HJ Lu's 3 number binutil versions
+ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
+
+# ld-ifversion
+# Usage: $(call ld-ifversion, -ge, 22252, y)
+ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3))
+
######
###
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index d5d859c..9f0ee22 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -198,7 +198,7 @@
$(multi-objs-y:.o=.lst) : modname = $(modname-multi)
quiet_cmd_cc_s_c = CC $(quiet_modtag) $@
-cmd_cc_s_c = $(CC) $(c_flags) -fverbose-asm -S -o $@ $<
+cmd_cc_s_c = $(CC) $(c_flags) $(DISABLE_LTO) -fverbose-asm -S -o $@ $<
$(obj)/%.s: $(src)/%.c FORCE
$(call if_changed_dep,cc_s_c)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 49392ec..79c059e 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -152,6 +152,7 @@
dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
-I$(srctree)/arch/$(SRCARCH)/boot/dts \
-I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
+ -I$(srctree)/drivers/of/testcase-data \
-undef -D__DTS__
# Finds the multi-part object the current object will be linked into
diff --git a/scripts/gcc-ld b/scripts/gcc-ld
new file mode 100644
index 0000000..cadab9a
--- /dev/null
+++ b/scripts/gcc-ld
@@ -0,0 +1,29 @@
+#!/bin/sh
+# run gcc with ld options
+# used as a wrapper to execute link time optimizations
+# yes virginia, this is not pretty
+
+ARGS="-nostdlib"
+
+while [ "$1" != "" ] ; do
+ case "$1" in
+ -save-temps|-m32|-m64) N="$1" ;;
+ -r) N="$1" ;;
+ -[Wg]*) N="$1" ;;
+ -[olv]|-[Ofd]*|-nostdlib) N="$1" ;;
+ --end-group|--start-group)
+ N="-Wl,$1" ;;
+ -[RTFGhIezcbyYu]*|\
+--script|--defsym|-init|-Map|--oformat|-rpath|\
+-rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\
+--version-script|--dynamic-list|--version-exports-symbol|--wrap|-m)
+ A="$1" ; shift ; N="-Wl,$A,$1" ;;
+ -[m]*) N="$1" ;;
+ -*) N="-Wl,$1" ;;
+ *) N="$1" ;;
+ esac
+ ARGS="$ARGS $N"
+ shift
+done
+
+exec $CC $ARGS
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index ef47409..17fa901 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -257,7 +257,7 @@
&& compr="lzop -9 -f"
echo "$output_file" | grep -q "\.lz4$" \
&& [ -x "`which lz4 2> /dev/null`" ] \
- && compr="lz4 -9 -f"
+ && compr="lz4 -l -9 -f"
echo "$output_file" | grep -q "\.cpio$" && compr="cat"
shift
;;
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
new file mode 100755
index 0000000..198580d
--- /dev/null
+++ b/scripts/ld-version.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/awk -f
+# extract linker version number from stdin and turn into single number
+ {
+ gsub(".*)", "");
+ split($1,a, ".");
+ print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
+ exit
+ }
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 4061098..0663556 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -623,7 +623,10 @@
switch (sym->st_shndx) {
case SHN_COMMON:
- warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
+ if (!strncmp(symname, "__gnu_lto_", sizeof("__gnu_lto_")-1)) {
+ /* Should warn here, but modpost runs before the linker */
+ } else
+ warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
break;
case SHN_UNDEF:
/* undefined symbol */
@@ -849,6 +852,7 @@
".xt.lit", /* xtensa */
".arcextmap*", /* arc */
".gnu.linkonce.arcext*", /* arc : modules */
+ ".gnu.lto*",
NULL
};
@@ -1455,6 +1459,10 @@
to = find_elf_symbol(elf, r->r_addend, sym);
tosym = sym_name(elf, to);
+ if (!strncmp(fromsym, "reference___initcall",
+ sizeof("reference___initcall")-1))
+ return;
+
/* check whitelist - we may ignore it */
if (secref_whitelist(mismatch,
fromsec, fromsym, tosec, tosym)) {
@@ -1502,6 +1510,16 @@
#define R_ARM_JUMP24 29
#endif
+#ifndef R_ARM_THM_CALL
+#define R_ARM_THM_CALL 10
+#endif
+#ifndef R_ARM_THM_JUMP24
+#define R_ARM_THM_JUMP24 30
+#endif
+#ifndef R_ARM_THM_JUMP19
+#define R_ARM_THM_JUMP19 51
+#endif
+
static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
@@ -1515,6 +1533,9 @@
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ case R_ARM_THM_JUMP19:
/* From ARM ABI: ((S + A) | T) - P */
r->r_addend = (int)(long)(elf->hdr +
sechdr->sh_offset +
@@ -1680,6 +1701,19 @@
}
}
+static char *remove_dot(char *s)
+{
+ char *end;
+ int n = strcspn(s, ".");
+
+ if (n > 0 && s[n] != 0) {
+ strtoul(s + n + 1, &end, 10);
+ if (end > s + n + 1 && (*end == '.' || *end == 0))
+ s[n] = 0;
+ }
+ return s;
+}
+
static void read_symbols(char *modname)
{
const char *symname;
@@ -1718,7 +1752,7 @@
}
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
- symname = info.strtab + sym->st_name;
+ symname = remove_dot(info.strtab + sym->st_name);
handle_modversions(mod, &info, sym, symname);
handle_moddevtable(mod, &info, sym, symname);
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 51207e4..168b43d 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -127,7 +127,7 @@
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
- const char *strtab;
+ char *strtab;
char *modinfo;
unsigned int modinfo_len;
diff --git a/security/capability.c b/security/capability.c
index 8b4f24a..21e2b9c 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -757,7 +757,8 @@
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx)
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
diff --git a/security/keys/compat.c b/security/keys/compat.c
index bbd32c7..3478965 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -65,8 +65,8 @@
* taking a 32-bit syscall are zero. If you can, you should call sys_keyctl()
* directly.
*/
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+ u32, arg2, u32, arg3, u32, arg4, u32, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d46cbc5..2fb2576 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,7 +1000,11 @@
kenter("{%d}", key->serial);
- BUG_ON(key != ctx->match_data);
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data)
+ return 0;
+
ctx->result = ERR_PTR(-EDEADLK);
return 1;
}
diff --git a/security/security.c b/security/security.c
index 15b6928..919cad9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1317,9 +1317,11 @@
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
- return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
+ return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b34847..b332e2c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -668,7 +668,7 @@
if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
- strlen(mount_options[i]), &sid);
+ strlen(mount_options[i]), &sid, GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2489,7 +2489,8 @@
if (flags[i] == SBLABEL_MNT)
continue;
len = strlen(mount_options[i]);
- rc = security_context_to_sid(mount_options[i], len, &sid);
+ rc = security_context_to_sid(mount_options[i], len, &sid,
+ GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2893,7 +2894,7 @@
if (rc)
return rc;
- rc = security_context_to_sid(value, size, &newsid);
+ rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
if (rc == -EINVAL) {
if (!capable(CAP_MAC_ADMIN)) {
struct audit_buffer *ab;
@@ -3050,7 +3051,7 @@
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid((void *)value, size, &newsid);
+ rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
if (rc)
return rc;
@@ -5529,7 +5530,7 @@
str[size-1] = 0;
size--;
}
- error = security_context_to_sid(value, size, &sid);
+ error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
if (!capable(CAP_MAC_ADMIN)) {
struct audit_buffer *ab;
@@ -5638,7 +5639,7 @@
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return security_context_to_sid(secdata, seclen, secid);
+ return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8ed8daf..ce7852c 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -134,7 +134,7 @@
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
int security_context_to_sid(const char *scontext, u32 scontext_len,
- u32 *out_sid);
+ u32 *out_sid, gfp_t gfp);
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 48c3cc9..9f05847 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,7 +10,8 @@
#include <net/flow.h>
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx);
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 5122aff..d60c0ee 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -576,7 +576,7 @@
if (length)
goto out;
- length = security_context_to_sid(buf, size, &sid);
+ length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -731,11 +731,13 @@
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -817,11 +819,13 @@
objname = namebuf;
}
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -878,11 +882,13 @@
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -934,7 +940,7 @@
if (sscanf(buf, "%s %s", con, user) != 2)
goto out;
- length = security_context_to_sid(con, strlen(con) + 1, &sid);
+ length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -994,11 +1000,13 @@
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c0f4988..9c5cdc2ca 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3338,10 +3338,10 @@
if (rc)
return rc;
- buf[0] = ft->stype;
- buf[1] = ft->ttype;
- buf[2] = ft->tclass;
- buf[3] = otype->otype;
+ buf[0] = cpu_to_le32(ft->stype);
+ buf[1] = cpu_to_le32(ft->ttype);
+ buf[2] = cpu_to_le32(ft->tclass);
+ buf[3] = cpu_to_le32(otype->otype);
rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 5d0144e..4bca494 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1289,16 +1289,18 @@
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
+ * @gfp: context for the allocation
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+ gfp_t gfp)
{
return security_context_to_sid_core(scontext, scontext_len,
- sid, SECSID_NULL, GFP_KERNEL, 0);
+ sid, SECSID_NULL, gfp, 0);
}
/**
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 0462cb3..98b0426 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -78,7 +78,8 @@
* xfrm_user_sec_ctx context.
*/
static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
int rc;
const struct task_security_struct *tsec = current_security();
@@ -94,7 +95,7 @@
if (str_len >= PAGE_SIZE)
return -ENOMEM;
- ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
if (!ctx)
return -ENOMEM;
@@ -103,7 +104,7 @@
ctx->ctx_len = str_len;
memcpy(ctx->ctx_str, &uctx[1], str_len);
ctx->ctx_str[str_len] = '\0';
- rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
if (rc)
goto err;
@@ -282,9 +283,10 @@
* LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
- return selinux_xfrm_alloc_user(ctxp, uctx);
+ return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
}
/*
@@ -332,7 +334,7 @@
int selinux_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *uctx)
{
- return selinux_xfrm_alloc_user(&x->security, uctx);
+ return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
}
/*
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 7a20897..7403f34 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -133,7 +133,7 @@
kfree(data);
}
snd_card_unref(compr->card);
- return 0;
+ return ret;
}
static int snd_compr_free(struct inode *inode, struct file *f)
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index df3652a..8ed0bcc 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1026,6 +1026,9 @@
spec->gen.keep_eapd_on = 1;
spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
spec->eapd_nid = 0x12;
+ /* Analog PC Beeper - allow firmware/ACPI beeps */
+ spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT);
+ spec->gen.beep_nid = 0; /* no digital beep */
}
}
@@ -1092,6 +1095,7 @@
spec = codec->spec;
spec->gen.mixer_nid = 0x20;
+ spec->gen.mixer_merge_nid = 0x21;
spec->gen.beep_nid = 0x10;
set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 54d1479..46ecdbb 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -2662,60 +2662,6 @@
}
/*
- * PCM stuffs
- */
-static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
- u32 stream_tag,
- int channel_id, int format)
-{
- unsigned int oldval, newval;
-
- if (!nid)
- return;
-
- snd_printdd(
- "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
- "channel=%d, format=0x%x\n",
- nid, stream_tag, channel_id, format);
-
- /* update the format-id if changed */
- oldval = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_STREAM_FORMAT,
- 0);
- if (oldval != format) {
- msleep(20);
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_STREAM_FORMAT,
- format);
- }
-
- oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- newval = (stream_tag << 4) | channel_id;
- if (oldval != newval) {
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- newval);
- }
-}
-
-static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
-{
- unsigned int val;
-
- if (!nid)
- return;
-
- snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
-
- val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- if (!val)
- return;
-
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
-}
-
-/*
* PCM callbacks
*/
static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
@@ -2726,7 +2672,7 @@
{
struct ca0132_spec *spec = codec->spec;
- ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
+ snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
return 0;
}
@@ -2745,7 +2691,7 @@
if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
msleep(50);
- ca0132_cleanup_stream(codec, spec->dacs[0]);
+ snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
return 0;
}
@@ -2822,10 +2768,8 @@
unsigned int format,
struct snd_pcm_substream *substream)
{
- struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->adcs[substream->number],
- stream_tag, 0, format);
+ snd_hda_codec_setup_stream(codec, hinfo->nid,
+ stream_tag, 0, format);
return 0;
}
@@ -2839,7 +2783,7 @@
if (spec->dsp_state == DSP_DOWNLOADING)
return 0;
- ca0132_cleanup_stream(codec, hinfo->nid);
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
return 0;
}
@@ -4742,6 +4686,8 @@
return err;
codec->patch_ops = ca0132_patch_ops;
+ codec->pcm_format_first = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a9a83b8..8d0a844 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3616,6 +3616,19 @@
}
}
+static void alc_no_shutup(struct hda_codec *codec)
+{
+}
+
+static void alc_fixup_no_shutup(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ struct alc_spec *spec = codec->spec;
+ spec->shutup = alc_no_shutup;
+ }
+}
+
static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -3844,6 +3857,7 @@
ALC269_FIXUP_HP_GPIO_LED,
ALC269_FIXUP_INV_DMIC,
ALC269_FIXUP_LENOVO_DOCK,
+ ALC269_FIXUP_NO_SHUTUP,
ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4020,6 +4034,10 @@
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
},
+ [ALC269_FIXUP_NO_SHUTUP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_no_shutup,
+ },
[ALC269_FIXUP_LENOVO_DOCK] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -4253,6 +4271,7 @@
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
@@ -4308,7 +4327,9 @@
SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4317,6 +4338,54 @@
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ /* ALC282 */
+ SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22a0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c1, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22cd, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22ce, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22d0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ /* ALC290 */
+ SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2261, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2262, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2280, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2281, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2289, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c6, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4354,6 +4423,7 @@
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5113,7 +5183,7 @@
SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
- SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7311bad..3bc29c9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -83,6 +83,7 @@
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
+ STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD73XX_MODELS
};
@@ -97,6 +98,7 @@
STAC_92HD83XXX_HP_LED,
STAC_92HD83XXX_HP_INV_LED,
STAC_92HD83XXX_HP_MIC_LED,
+ STAC_HP_LED_GPIO10,
STAC_92HD83XXX_HEADSET_JACK,
STAC_92HD83XXX_HP,
STAC_HP_ENVY_BASS,
@@ -1795,6 +1797,12 @@
{}
};
+static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
+ { 0x0a, 0x02214030 },
+ { 0x0b, 0x02A19010 },
+ {}
+};
+
static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -1913,6 +1921,10 @@
[STAC_92HD73XX_NO_JD] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd73xx_fixup_no_jd,
+ },
+ [STAC_92HD89XX_HP_FRONT_JACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
}
};
@@ -1973,6 +1985,8 @@
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
{} /* terminator */
};
@@ -2117,6 +2131,17 @@
}
}
+static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gpio_led = 0x10; /* GPIO4 */
+ spec->default_polarity = 0;
+ }
+}
+
static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -2611,6 +2636,12 @@
.chained = true,
.chain_id = STAC_92HD83XXX_HP,
},
+ [STAC_HP_LED_GPIO10] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd83xxx_fixup_hp_led_gpio10,
+ .chained = true,
+ .chain_id = STAC_92HD83XXX_HP,
+ },
[STAC_92HD83XXX_HEADSET_JACK] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd83xxx_fixup_headset_jack,
@@ -2689,6 +2720,8 @@
"HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888,
"HP Envy Spectre", STAC_HP_ENVY_BASS),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
+ "HP Folio 13", STAC_HP_LED_GPIO10),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
"HP Folio", STAC_HP_BNB13_EQ),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18F8,
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index ed6f199..4cf3200 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -238,11 +238,21 @@
cs4245_write_spi(chip, CS4245_MCLK_FREQ);
}
+static inline unsigned int shift_bits(unsigned int value,
+ unsigned int shift_from,
+ unsigned int shift_to,
+ unsigned int mask)
+{
+ if (shift_from < shift_to)
+ return (value << (shift_to - shift_from)) & mask;
+ else
+ return (value >> (shift_from - shift_to)) & mask;
+}
+
unsigned int adjust_dg_dac_routing(struct oxygen *chip,
unsigned int play_routing)
{
struct dg *data = chip->model_data;
- unsigned int routing = 0;
switch (data->output_sel) {
case PLAYBACK_DST_HP:
@@ -252,15 +262,23 @@
OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK);
break;
case PLAYBACK_DST_MULTICH:
- routing = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) |
- (2 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
- (1 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
- (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK);
break;
}
- return routing;
+ return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC1_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC2_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC3_SOURCE_MASK);
}
void dump_cs4245_registers(struct oxygen *chip,
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 54f74f8..4544d8e 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -11,7 +11,7 @@
config SND_BF5XX_SOC_SSM2602
tristate "SoC SSM2602 Audio Codec Add-On Card support"
- depends on SND_BF5XX_I2S && (SPI_MASTER || I2C)
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S if !BF60x
select SND_BF6XX_SOC_I2S if BF60x
select SND_SOC_SSM2602
@@ -21,10 +21,9 @@
config SND_SOC_BFIN_EVAL_ADAU1701
tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && I2C
select SND_BF5XX_SOC_I2S
select SND_SOC_ADAU1701
- select I2C
help
Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ
board connected to one of the Blackfin evaluation boards like the
@@ -45,7 +44,7 @@
config SND_SOC_BFIN_EVAL_ADAV80X
tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards"
- depends on SND_BF5XX_I2S && (SPI_MASTER || I2C)
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S
select SND_SOC_ADAV80X
help
@@ -58,7 +57,7 @@
config SND_BF5XX_SOC_AD1836
tristate "SoC AD1836 Audio support for BF5xx"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && SPI_MASTER
select SND_BF5XX_SOC_I2S
select SND_SOC_AD1836
help
@@ -66,7 +65,7 @@
config SND_BF5XX_SOC_AD193X
tristate "SoC AD193X Audio support for Blackfin"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S
select SND_SOC_AD193X
help
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 75d0ad5..647a72c 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1328,6 +1328,9 @@
pm860x->codec = codec;
codec->control_data = pm860x->regmap;
+ ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+ if (ret)
+ return ret;
for (i = 0; i < 4; i++) {
ret = request_threaded_irq(pm860x->irq[i], NULL,
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 7257a88..34d965a 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -57,8 +57,8 @@
static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line",
"Stereo Mix", "Mono Mix", "Phone"};
-static const struct soc_enum ad1980_cap_src =
- SOC_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 7, ad1980_rec_sel);
+static SOC_ENUM_DOUBLE_DECL(ad1980_cap_src,
+ AC97_REC_SEL, 8, 0, ad1980_rec_sel);
static const struct snd_kcontrol_new ad1980_snd_ac97_controls[] = {
SOC_DOUBLE("Master Playback Volume", AC97_MASTER, 8, 0, 31, 1),
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index f295b65..f4d965ebc 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1268,11 +1268,23 @@
},
};
+static bool da732x_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA732X_REG_HPL_DAC_OFF_CNTL:
+ case DA732X_REG_HPR_DAC_OFF_CNTL:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config da732x_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA732X_MAX_REG,
+ .volatile_reg = da732x_volatile,
.reg_defaults = da732x_reg_cache,
.num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
.cache_type = REGCACHE_RBTREE,
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 52b79a4..4228126 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1523,8 +1523,15 @@
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the CODEC and PMIC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 Ids for CODEC
+ * and PMIC, which must be different to operate together.
+ */
static const struct i2c_device_id da9055_i2c_id[] = {
- { "da9055", 0 },
+ { "da9055-codec", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
@@ -1532,7 +1539,7 @@
/* I2C codec control layer */
static struct i2c_driver da9055_i2c_driver = {
.driver = {
- .name = "da9055",
+ .name = "da9055-codec",
.owner = THIS_MODULE,
},
.probe = da9055_i2c_probe,
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index 5839048..cb736dd 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -140,13 +140,17 @@
static const char *isabelle_rx2_texts[] = {"VRX2", "ARX2"};
static const struct soc_enum isabelle_rx1_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3, 1, isabelle_rx1_texts),
- SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5, 1, isabelle_rx1_texts),
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3,
+ ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5,
+ ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts),
};
static const struct soc_enum isabelle_rx2_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2, 1, isabelle_rx2_texts),
- SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4, 1, isabelle_rx2_texts),
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2,
+ ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4,
+ ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts),
};
/* Headset DAC playback switches */
@@ -161,13 +165,17 @@
static const char *isabelle_vtx_texts[] = {"AMIC2", "DMIC"};
static const struct soc_enum isabelle_atx_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7, 1, isabelle_atx_texts),
- SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_atx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0,
+ ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts),
};
static const struct soc_enum isabelle_vtx_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6, 1, isabelle_vtx_texts),
- SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_vtx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6,
+ ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0,
+ ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts),
};
static const struct snd_kcontrol_new atx_mux_controls =
@@ -183,17 +191,13 @@
/* Left analog microphone selection */
static const char *isabelle_amic2_texts[] = {"Sub Mic", "Aux/FM Right"};
-static const struct soc_enum isabelle_amic1_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 5,
- ARRAY_SIZE(isabelle_amic1_texts),
- isabelle_amic1_texts),
-};
+static SOC_ENUM_SINGLE_DECL(isabelle_amic1_enum,
+ ISABELLE_AMIC_CFG_REG, 5,
+ isabelle_amic1_texts);
-static const struct soc_enum isabelle_amic2_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 4,
- ARRAY_SIZE(isabelle_amic2_texts),
- isabelle_amic2_texts),
-};
+static SOC_ENUM_SINGLE_DECL(isabelle_amic2_enum,
+ ISABELLE_AMIC_CFG_REG, 4,
+ isabelle_amic2_texts);
static const struct snd_kcontrol_new amic1_control =
SOC_DAPM_ENUM("Route", isabelle_amic1_enum);
@@ -206,16 +210,20 @@
static const char *isabelle_st_voice_texts[] = {"VTX1", "VTX2"};
static const struct soc_enum isabelle_st_audio_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_audio_texts),
isabelle_st_audio_texts),
- SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_audio_texts),
isabelle_st_audio_texts),
};
static const struct soc_enum isabelle_st_voice_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_voice_texts),
isabelle_st_voice_texts),
- SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_voice_texts),
isabelle_st_voice_texts),
};
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 51f9b3d..9f714ea 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -336,6 +336,7 @@
case M98090_REG_RECORD_TDM_SLOT:
case M98090_REG_SAMPLE_RATE:
case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+ case M98090_REG_REVISION_ID:
return true;
default:
return false;
@@ -1769,16 +1770,6 @@
switch (level) {
case SND_SOC_BIAS_ON:
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- ret = regcache_sync(max98090->regmap);
-
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to sync cache: %d\n", ret);
- return ret;
- }
- }
-
if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
/*
* Set to normal bias level.
@@ -1792,6 +1783,16 @@
break;
case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ ret = regcache_sync(max98090->regmap);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to sync cache: %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
case SND_SOC_BIAS_OFF:
/* Set internal pull-up to lowest power mode */
snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index a3fb411..8869249 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2093,6 +2093,7 @@
#ifdef CONFIG_ACPI
static struct acpi_device_id rt5640_acpi_match[] = {
{ "INT33CA", 0 },
+ { "10EC5640", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 52e7cb0..fa2b8e0 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -210,7 +210,7 @@
static int si476x_codec_probe(struct snd_soc_codec *codec)
{
codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
- return 0;
+ return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
}
static struct snd_soc_dai_ops si476x_dai_ops = {
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 06edb39..2735361 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -187,42 +187,42 @@
13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
};
-static const struct soc_enum sta32x_drc_ac_enum =
- SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
- 2, sta32x_drc_ac);
-static const struct soc_enum sta32x_auto_eq_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
- 3, sta32x_auto_eq_mode);
-static const struct soc_enum sta32x_auto_gc_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
- 4, sta32x_auto_gc_mode);
-static const struct soc_enum sta32x_auto_xo_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
- 16, sta32x_auto_xo_mode);
-static const struct soc_enum sta32x_preset_eq_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
- 32, sta32x_preset_eq_mode);
-static const struct soc_enum sta32x_limiter_ch1_enum =
- SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter_ch2_enum =
- SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter_ch3_enum =
- SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter1_attack_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
- 16, sta32x_limiter_attack_rate);
-static const struct soc_enum sta32x_limiter2_attack_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
- 16, sta32x_limiter_attack_rate);
-static const struct soc_enum sta32x_limiter1_release_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
- 16, sta32x_limiter_release_rate);
-static const struct soc_enum sta32x_limiter2_release_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
- 16, sta32x_limiter_release_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
+ STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
+ sta32x_drc_ac);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
+ STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
+ sta32x_auto_eq_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
+ STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
+ sta32x_auto_gc_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
+ STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
+ sta32x_auto_xo_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
+ STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
+ sta32x_preset_eq_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
+ STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
+ STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
+ STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
+ STA32X_L1AR, STA32X_LxA_SHIFT,
+ sta32x_limiter_attack_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
+ STA32X_L2AR, STA32X_LxA_SHIFT,
+ sta32x_limiter_attack_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
+ STA32X_L1AR, STA32X_LxR_SHIFT,
+ sta32x_limiter_release_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
+ STA32X_L2AR, STA32X_LxR_SHIFT,
+ sta32x_limiter_release_rate);
/* byte array controls for setting biquad, mixer, scaling coefficients;
* for biquads all five coefficients need to be set in one go,
@@ -331,7 +331,7 @@
static int sta32x_cache_sync(struct snd_soc_codec *codec)
{
- struct sta32x_priv *sta32x = codec->control_data;
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
unsigned int mute;
int rc;
@@ -434,7 +434,7 @@
SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
-SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
+SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
/* depending on mode, the attack/release thresholds have
* two different enum definitions; provide both
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 48dc7d2..6d684d9 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -117,19 +117,23 @@
static const char *wm8400_digital_sidetone[] =
{"None", "Left ADC", "Right ADC", "Reserved"};
-static const struct soc_enum wm8400_left_digital_sidetone_enum =
-SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE,
- WM8400_ADC_TO_DACL_SHIFT, 2, wm8400_digital_sidetone);
+static SOC_ENUM_SINGLE_DECL(wm8400_left_digital_sidetone_enum,
+ WM8400_DIGITAL_SIDE_TONE,
+ WM8400_ADC_TO_DACL_SHIFT,
+ wm8400_digital_sidetone);
-static const struct soc_enum wm8400_right_digital_sidetone_enum =
-SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE,
- WM8400_ADC_TO_DACR_SHIFT, 2, wm8400_digital_sidetone);
+static SOC_ENUM_SINGLE_DECL(wm8400_right_digital_sidetone_enum,
+ WM8400_DIGITAL_SIDE_TONE,
+ WM8400_ADC_TO_DACR_SHIFT,
+ wm8400_digital_sidetone);
static const char *wm8400_adcmode[] =
{"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"};
-static const struct soc_enum wm8400_right_adcmode_enum =
-SOC_ENUM_SINGLE(WM8400_ADC_CTRL, WM8400_ADC_HPF_CUT_SHIFT, 3, wm8400_adcmode);
+static SOC_ENUM_SINGLE_DECL(wm8400_right_adcmode_enum,
+ WM8400_ADC_CTRL,
+ WM8400_ADC_HPF_CUT_SHIFT,
+ wm8400_adcmode);
static const struct snd_kcontrol_new wm8400_snd_controls[] = {
/* INMIXL */
@@ -422,9 +426,10 @@
static const char *wm8400_ainlmux[] =
{"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"};
-static const struct soc_enum wm8400_ainlmux_enum =
-SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINLMODE_SHIFT,
- ARRAY_SIZE(wm8400_ainlmux), wm8400_ainlmux);
+static SOC_ENUM_SINGLE_DECL(wm8400_ainlmux_enum,
+ WM8400_INPUT_MIXER1,
+ WM8400_AINLMODE_SHIFT,
+ wm8400_ainlmux);
static const struct snd_kcontrol_new wm8400_dapm_ainlmux_controls =
SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum);
@@ -435,9 +440,10 @@
static const char *wm8400_ainrmux[] =
{"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"};
-static const struct soc_enum wm8400_ainrmux_enum =
-SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINRMODE_SHIFT,
- ARRAY_SIZE(wm8400_ainrmux), wm8400_ainrmux);
+static SOC_ENUM_SINGLE_DECL(wm8400_ainrmux_enum,
+ WM8400_INPUT_MIXER1,
+ WM8400_AINRMODE_SHIFT,
+ wm8400_ainrmux);
static const struct snd_kcontrol_new wm8400_dapm_ainrmux_controls =
SOC_DAPM_ENUM("Route", wm8400_ainrmux_enum);
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 89a18d8..5bce210 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -196,8 +196,8 @@
"AIN5", "AIN6", "AIN7", "AIN8"
};
-static const struct soc_enum ain_enum =
- SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
+static SOC_ENUM_DOUBLE_DECL(ain_enum,
+ WM8770_ADCMUX, 0, 4, ain_text);
static const struct snd_kcontrol_new ain_mux =
SOC_DAPM_ENUM("Capture Mux", ain_enum);
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index e98bc70..43c2201 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -304,53 +304,53 @@
static const char *mic_bias_level_txt[] = { "0.9*AVDD", "0.65*AVDD" };
-static const struct soc_enum mic_bias_level =
-SOC_ENUM_SINGLE(WM8900_REG_INCTL, 8, 2, mic_bias_level_txt);
+static SOC_ENUM_SINGLE_DECL(mic_bias_level,
+ WM8900_REG_INCTL, 8, mic_bias_level_txt);
static const char *dac_mute_rate_txt[] = { "Fast", "Slow" };
-static const struct soc_enum dac_mute_rate =
-SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 7, 2, dac_mute_rate_txt);
+static SOC_ENUM_SINGLE_DECL(dac_mute_rate,
+ WM8900_REG_DACCTRL, 7, dac_mute_rate_txt);
static const char *dac_deemphasis_txt[] = {
"Disabled", "32kHz", "44.1kHz", "48kHz"
};
-static const struct soc_enum dac_deemphasis =
-SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 4, 4, dac_deemphasis_txt);
+static SOC_ENUM_SINGLE_DECL(dac_deemphasis,
+ WM8900_REG_DACCTRL, 4, dac_deemphasis_txt);
static const char *adc_hpf_cut_txt[] = {
"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"
};
-static const struct soc_enum adc_hpf_cut =
-SOC_ENUM_SINGLE(WM8900_REG_ADCCTRL, 5, 4, adc_hpf_cut_txt);
+static SOC_ENUM_SINGLE_DECL(adc_hpf_cut,
+ WM8900_REG_ADCCTRL, 5, adc_hpf_cut_txt);
static const char *lr_txt[] = {
"Left", "Right"
};
-static const struct soc_enum aifl_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 15, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(aifl_src,
+ WM8900_REG_AUDIO1, 15, lr_txt);
-static const struct soc_enum aifr_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 14, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(aifr_src,
+ WM8900_REG_AUDIO1, 14, lr_txt);
-static const struct soc_enum dacl_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 15, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(dacl_src,
+ WM8900_REG_AUDIO2, 15, lr_txt);
-static const struct soc_enum dacr_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 14, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(dacr_src,
+ WM8900_REG_AUDIO2, 14, lr_txt);
static const char *sidetone_txt[] = {
"Disabled", "Left ADC", "Right ADC"
};
-static const struct soc_enum dacl_sidetone =
-SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 2, 3, sidetone_txt);
+static SOC_ENUM_SINGLE_DECL(dacl_sidetone,
+ WM8900_REG_SIDETONE, 2, sidetone_txt);
-static const struct soc_enum dacr_sidetone =
-SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 0, 3, sidetone_txt);
+static SOC_ENUM_SINGLE_DECL(dacr_sidetone,
+ WM8900_REG_SIDETONE, 0, sidetone_txt);
static const struct snd_kcontrol_new wm8900_snd_controls[] = {
SOC_ENUM("Mic Bias Level", mic_bias_level),
@@ -496,8 +496,8 @@
static const char *wm8900_lp_mux[] = { "Disabled", "Enabled" };
-static const struct soc_enum wm8900_lineout2_lp_mux =
-SOC_ENUM_SINGLE(WM8900_REG_LOUTMIXCTL1, 1, 2, wm8900_lp_mux);
+static SOC_ENUM_SINGLE_DECL(wm8900_lineout2_lp_mux,
+ WM8900_REG_LOUTMIXCTL1, 1, wm8900_lp_mux);
static const struct snd_kcontrol_new wm8900_lineout2_lp =
SOC_DAPM_ENUM("Route", wm8900_lineout2_lp_mux);
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index b7488f1..d4248e0 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -153,7 +153,7 @@
data32 &= 0xffffff;
- wm8994_bulk_write(codec->control_data,
+ wm8994_bulk_write(wm8994->wm8994,
data32 & 0xffffff,
block_len / 2,
(void *)(data + 8));
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 433d59a..2ee23a3 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1562,7 +1562,6 @@
struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
return 0;
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index b9be9cb..adb7206 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -265,21 +265,21 @@
"2.7kHz", "1.35kHz", "675Hz", "370Hz", "180Hz", "90Hz", "45Hz"
};
-static const struct soc_enum sidetone_hpf =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text);
+static SOC_ENUM_SINGLE_DECL(sidetone_hpf,
+ WM8994_SIDETONE, 7, sidetone_hpf_text);
static const char *adc_hpf_text[] = {
"HiFi", "Voice 1", "Voice 2", "Voice 3"
};
-static const struct soc_enum aif1adc1_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif1adc1_hpf,
+ WM8994_AIF1_ADC1_FILTERS, 13, adc_hpf_text);
-static const struct soc_enum aif1adc2_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif1adc2_hpf,
+ WM8994_AIF1_ADC2_FILTERS, 13, adc_hpf_text);
-static const struct soc_enum aif2adc_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif2adc_hpf,
+ WM8994_AIF2_ADC_FILTERS, 13, adc_hpf_text);
static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
@@ -501,39 +501,39 @@
"Left", "Right"
};
-static const struct soc_enum aif1adcl_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1adcl_src,
+ WM8994_AIF1_CONTROL_1, 15, aif_chan_src_text);
-static const struct soc_enum aif1adcr_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1adcr_src,
+ WM8994_AIF1_CONTROL_1, 14, aif_chan_src_text);
-static const struct soc_enum aif2adcl_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2adcl_src,
+ WM8994_AIF2_CONTROL_1, 15, aif_chan_src_text);
-static const struct soc_enum aif2adcr_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2adcr_src,
+ WM8994_AIF2_CONTROL_1, 14, aif_chan_src_text);
-static const struct soc_enum aif1dacl_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1dacl_src,
+ WM8994_AIF1_CONTROL_2, 15, aif_chan_src_text);
-static const struct soc_enum aif1dacr_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1dacr_src,
+ WM8994_AIF1_CONTROL_2, 14, aif_chan_src_text);
-static const struct soc_enum aif2dacl_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacl_src,
+ WM8994_AIF2_CONTROL_2, 15, aif_chan_src_text);
-static const struct soc_enum aif2dacr_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacr_src,
+ WM8994_AIF2_CONTROL_2, 14, aif_chan_src_text);
static const char *osr_text[] = {
"Low Power", "High Performance",
};
-static const struct soc_enum dac_osr =
- SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text);
+static SOC_ENUM_SINGLE_DECL(dac_osr,
+ WM8994_OVERSAMPLING, 0, osr_text);
-static const struct soc_enum adc_osr =
- SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text);
+static SOC_ENUM_SINGLE_DECL(adc_osr,
+ WM8994_OVERSAMPLING, 1, osr_text);
static const struct snd_kcontrol_new wm8994_snd_controls[] = {
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
@@ -690,17 +690,20 @@
"30ms", "125ms", "250ms", "500ms",
};
-static const struct soc_enum wm8958_aif1dac1_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF1_DAC1_NOISE_GATE,
- WM8958_AIF1DAC1_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac1_ng_hold,
+ WM8958_AIF1_DAC1_NOISE_GATE,
+ WM8958_AIF1DAC1_NG_THR_SHIFT,
+ wm8958_ng_text);
-static const struct soc_enum wm8958_aif1dac2_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF1_DAC2_NOISE_GATE,
- WM8958_AIF1DAC2_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac2_ng_hold,
+ WM8958_AIF1_DAC2_NOISE_GATE,
+ WM8958_AIF1DAC2_NG_THR_SHIFT,
+ wm8958_ng_text);
-static const struct soc_enum wm8958_aif2dac_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF2_DAC_NOISE_GATE,
- WM8958_AIF2DAC_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif2dac_ng_hold,
+ WM8958_AIF2_DAC_NOISE_GATE,
+ WM8958_AIF2DAC_NG_THR_SHIFT,
+ wm8958_ng_text);
static const struct snd_kcontrol_new wm8958_snd_controls[] = {
SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv),
@@ -1341,8 +1344,8 @@
"DMIC",
};
-static const struct soc_enum adc_enum =
- SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
+static SOC_ENUM_SINGLE_DECL(adc_enum,
+ 0, 0, adc_mux_text);
static const struct snd_kcontrol_new adcl_mux =
SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
@@ -1478,14 +1481,14 @@
"ADC/DMIC1", "DMIC2",
};
-static const struct soc_enum sidetone1_enum =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 0, 2, sidetone_text);
+static SOC_ENUM_SINGLE_DECL(sidetone1_enum,
+ WM8994_SIDETONE, 0, sidetone_text);
static const struct snd_kcontrol_new sidetone1_mux =
SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
-static const struct soc_enum sidetone2_enum =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 1, 2, sidetone_text);
+static SOC_ENUM_SINGLE_DECL(sidetone2_enum,
+ WM8994_SIDETONE, 1, sidetone_text);
static const struct snd_kcontrol_new sidetone2_mux =
SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
@@ -1498,22 +1501,24 @@
"None", "ADCDAT",
};
-static const struct soc_enum aif1_loopback_enum =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2,
- loopback_text);
+static SOC_ENUM_SINGLE_DECL(aif1_loopback_enum,
+ WM8994_AIF1_CONTROL_2,
+ WM8994_AIF1_LOOPBACK_SHIFT,
+ loopback_text);
static const struct snd_kcontrol_new aif1_loopback =
SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum);
-static const struct soc_enum aif2_loopback_enum =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2,
- loopback_text);
+static SOC_ENUM_SINGLE_DECL(aif2_loopback_enum,
+ WM8994_AIF2_CONTROL_2,
+ WM8994_AIF2_LOOPBACK_SHIFT,
+ loopback_text);
static const struct snd_kcontrol_new aif2_loopback =
SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum);
-static const struct soc_enum aif1dac_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text);
+static SOC_ENUM_SINGLE_DECL(aif1dac_enum,
+ WM8994_POWER_MANAGEMENT_6, 0, aif1dac_text);
static const struct snd_kcontrol_new aif1dac_mux =
SOC_DAPM_ENUM("AIF1DAC Mux", aif1dac_enum);
@@ -1522,8 +1527,8 @@
"AIF2DACDAT", "AIF3DACDAT",
};
-static const struct soc_enum aif2dac_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 1, 2, aif2dac_text);
+static SOC_ENUM_SINGLE_DECL(aif2dac_enum,
+ WM8994_POWER_MANAGEMENT_6, 1, aif2dac_text);
static const struct snd_kcontrol_new aif2dac_mux =
SOC_DAPM_ENUM("AIF2DAC Mux", aif2dac_enum);
@@ -1532,8 +1537,8 @@
"AIF2ADCDAT", "AIF3DACDAT",
};
-static const struct soc_enum aif2adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 2, 2, aif2adc_text);
+static SOC_ENUM_SINGLE_DECL(aif2adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 2, aif2adc_text);
static const struct snd_kcontrol_new aif2adc_mux =
SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum);
@@ -1542,14 +1547,14 @@
"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM",
};
-static const struct soc_enum wm8994_aif3adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text);
+static SOC_ENUM_SINGLE_DECL(wm8994_aif3adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text);
static const struct snd_kcontrol_new wm8994_aif3adc_mux =
SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum);
-static const struct soc_enum wm8958_aif3adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif3adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text);
static const struct snd_kcontrol_new wm8958_aif3adc_mux =
SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum);
@@ -1558,8 +1563,8 @@
"None", "AIF2ADCL", "AIF2ADCR",
};
-static const struct soc_enum mono_pcm_out_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text);
+static SOC_ENUM_SINGLE_DECL(mono_pcm_out_enum,
+ WM8994_POWER_MANAGEMENT_6, 9, mono_pcm_out_text);
static const struct snd_kcontrol_new mono_pcm_out_mux =
SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum);
@@ -1569,14 +1574,14 @@
};
/* Note that these two control shouldn't be simultaneously switched to AIF3 */
-static const struct soc_enum aif2dacl_src_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacl_src_enum,
+ WM8994_POWER_MANAGEMENT_6, 7, aif2dac_src_text);
static const struct snd_kcontrol_new aif2dacl_src_mux =
SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum);
-static const struct soc_enum aif2dacr_src_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacr_src_enum,
+ WM8994_POWER_MANAGEMENT_6, 8, aif2dac_src_text);
static const struct snd_kcontrol_new aif2dacr_src_mux =
SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 70ff377..5e3bc3c 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -399,6 +399,7 @@
.driver = {
.name = "davinci_evm",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
.of_match_table = of_match_ptr(davinci_evm_dt_ids),
},
};
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b7858bf..670afa2 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -263,7 +263,9 @@
unsigned int fmt)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+ pm_runtime_get_sync(mcasp->dev);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
case SND_SOC_DAIFMT_AC97:
@@ -317,7 +319,8 @@
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -354,10 +357,12 @@
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
-
- return 0;
+out:
+ pm_runtime_put_sync(mcasp->dev);
+ return ret;
}
static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
@@ -448,7 +453,7 @@
return 0;
}
-static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream,
+static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
int channels)
{
int i;
@@ -524,12 +529,18 @@
return 0;
}
-static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream)
+static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
{
int i, active_slots;
u32 mask = 0;
u32 busel = 0;
+ if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) {
+ dev_err(mcasp->dev, "tdm slot %d not supported\n",
+ mcasp->tdm_slots);
+ return -EINVAL;
+ }
+
active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
for (i = 0; i < active_slots; i++)
mask |= (1 << i);
@@ -539,35 +550,21 @@
if (!mcasp->dat_port)
busel = TXSEL;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /* bit stream is MSB first with no delay */
- /* DSP_B mode */
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
+ FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
- if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32))
- mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
- FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
- else
- printk(KERN_ERR "playback tdm slot %d not supported\n",
- mcasp->tdm_slots);
- } else {
- /* bit stream is MSB first with no delay */
- /* DSP_B mode */
- mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
+ FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
- if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32))
- mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
- FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
- else
- printk(KERN_ERR "capture tdm slot %d not supported\n",
- mcasp->tdm_slots);
- }
+ return 0;
}
/* S/PDIF */
-static void davinci_hw_dit_param(struct davinci_mcasp *mcasp)
+static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp)
{
/* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
and LSB first */
@@ -589,6 +586,8 @@
/* Enable the DIT */
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
+
+ return 0;
}
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
@@ -605,13 +604,14 @@
u8 slots = mcasp->tdm_slots;
u8 active_serializers;
int channels;
+ int ret;
struct snd_interval *pcm_channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
channels = pcm_channels->min;
active_serializers = (channels + slots - 1) / slots;
- if (davinci_hw_common_param(mcasp, substream->stream, channels) == -EINVAL)
+ if (mcasp_common_hw_param(mcasp, substream->stream, channels) == -EINVAL)
return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_level = mcasp->txnumevt * active_serializers;
@@ -619,9 +619,12 @@
fifo_level = mcasp->rxnumevt * active_serializers;
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
- davinci_hw_dit_param(mcasp);
+ ret = mcasp_dit_hw_param(mcasp);
else
- davinci_hw_param(mcasp, substream->stream);
+ ret = mcasp_i2s_hw_param(mcasp, substream->stream);
+
+ if (ret)
+ return ret;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
@@ -678,19 +681,9 @@
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ret = pm_runtime_get_sync(mcasp->dev);
- if (IS_ERR_VALUE(ret))
- dev_err(mcasp->dev, "pm_runtime_get_sync() failed\n");
davinci_mcasp_start(mcasp, substream->stream);
break;
-
case SNDRV_PCM_TRIGGER_SUSPEND:
- davinci_mcasp_stop(mcasp, substream->stream);
- ret = pm_runtime_put_sync(mcasp->dev);
- if (IS_ERR_VALUE(ret))
- dev_err(mcasp->dev, "pm_runtime_put_sync() failed\n");
- break;
-
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_mcasp_stop(mcasp, substream->stream);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index d0c72ed..c84026c 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -326,7 +326,7 @@
regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
- ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(tx_mask));
+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
@@ -334,7 +334,7 @@
regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
- ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(rx_mask));
+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
esai_priv->slot_width = slot_width;
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 9c9f957..75e1403 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -322,7 +322,7 @@
#define ESAI_xSMB_xS_SHIFT 0
#define ESAI_xSMB_xS_WIDTH 16
#define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT)
-#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMA_xS_MASK)
+#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMB_xS_MASK)
/* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */
#define ESAI_PRRC_PDC_SHIFT 0
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 79cee78..a2fd732 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -160,7 +160,6 @@
.driver = {
.name = "imx_mc13783",
.owner = THIS_MODULE,
- .pm = &snd_soc_pm_ops,
},
.probe = imx_mc13783_probe,
.remove = imx_mc13783_remove
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index f2beae7..1cb22dd 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -33,8 +33,7 @@
static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct imx_sgtl5000_data *data = container_of(rtd->card,
- struct imx_sgtl5000_data, card);
+ struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(rtd->card);
struct device *dev = rtd->card->dev;
int ret;
@@ -159,13 +158,15 @@
data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto fail;
}
- platform_set_drvdata(pdev, data);
of_node_put(ssi_np);
of_node_put(codec_np);
@@ -184,7 +185,8 @@
static int imx_sgtl5000_remove(struct platform_device *pdev)
{
- struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(card);
clk_put(data->codec_clk);
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 3fd76bc..3a3d17c 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -71,7 +71,7 @@
{
struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
struct imx_priv *priv = &card_priv;
- struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
struct device *dev = &priv->pdev->dev;
unsigned int pll_out;
int ret;
@@ -137,7 +137,7 @@
{
struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
struct imx_priv *priv = &card_priv;
- struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
struct device *dev = &priv->pdev->dev;
int ret;
@@ -264,13 +264,15 @@
data->card.late_probe = imx_wm8962_late_probe;
data->card.set_bias_level = imx_wm8962_set_bias_level;
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto clk_fail;
}
- platform_set_drvdata(pdev, data);
of_node_put(ssi_np);
of_node_put(codec_np);
@@ -289,7 +291,8 @@
static int imx_wm8962_remove(struct platform_device *pdev)
{
- struct imx_wm8962_data *data = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
if (!IS_ERR(data->codec_clk))
clk_disable_unprepare(data->codec_clk);
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 3fde9e4..d163e18 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -305,7 +305,9 @@
int err;
struct device *dev;
- if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax()))
+ if (!of_have_populated_dt() ||
+ (!of_machine_is_compatible("nokia,n810") &&
+ !of_machine_is_compatible("nokia,n810-wimax")))
return -ENODEV;
n810_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 454f41c..3507574 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -59,7 +59,7 @@
select SND_SOC_WM8750
select SND_S3C2412_SOC_I2S
help
- Sat Y if you want to add support for SoC audio on the Jive.
+ Say Y if you want to add support for SoC audio on the Jive.
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
@@ -145,11 +145,11 @@
config SND_SOC_SAMSUNG_SMDK_WM9713
tristate "SoC AC97 Audio support for SMDK with WM9713"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210)
+ depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
select SND_SOC_WM9713
select SND_SAMSUNG_AC97
help
- Sat Y if you want to add support for SoC audio on the SMDK.
+ Say Y if you want to add support for SoC audio on the SMDK.
config SND_SOC_SMARTQ
tristate "SoC I2S Audio support for SmartQ board"
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index dc8ff13..b9dc6ac 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1218,7 +1218,7 @@
ret = regulator_allow_bypass(w->regulator, false);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to bypass %s: %d\n",
+ "ASoC: Failed to unbypass %s: %d\n",
w->name, ret);
}
@@ -1228,7 +1228,7 @@
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to unbypass %s: %d\n",
+ "ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
@@ -3210,15 +3210,11 @@
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
const char *pin = (const char *)kcontrol->private_value;
- mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (ucontrol->value.integer.value[0])
snd_soc_dapm_enable_pin(&card->dapm, pin);
else
snd_soc_dapm_disable_pin(&card->dapm, pin);
- mutex_unlock(&card->dapm_mutex);
-
snd_soc_dapm_sync(&card->dapm);
return 0;
}
@@ -3248,7 +3244,7 @@
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to unbypass %s: %d\n",
+ "ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
break;
@@ -3767,22 +3763,84 @@
}
/**
+ * snd_soc_dapm_enable_pin_unlocked - enable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Enables input/output pin and its parents or children widgets iff there is
+ * a valid audio route and active audio stream.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 1);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin_unlocked);
+
+/**
* snd_soc_dapm_enable_pin - enable pin.
* @dapm: DAPM context
* @pin: pin name
*
* Enables input/output pin and its parents or children widgets iff there is
* a valid audio route and active audio stream.
+ *
* NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
* do any widget power switching.
*/
int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 1);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 1);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
/**
+ * snd_soc_dapm_force_enable_pin_unlocked - force a pin to be enabled
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Enables input/output pin regardless of any other state. This is
+ * intended for use with microphone bias supplies used in microphone
+ * jack detection.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+
+ if (!w) {
+ dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin);
+ return -EINVAL;
+ }
+
+ dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin);
+ w->connected = 1;
+ w->force = 1;
+ dapm_mark_dirty(w, "force enable");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin_unlocked);
+
+/**
* snd_soc_dapm_force_enable_pin - force a pin to be enabled
* @dapm: DAPM context
* @pin: pin name
@@ -3797,39 +3855,86 @@
int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin)
{
- struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+ int ret;
- if (!w) {
- dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin);
- return -EINVAL;
- }
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin);
- w->connected = 1;
- w->force = 1;
- dapm_mark_dirty(w, "force enable");
+ ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
- return 0;
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
/**
+ * snd_soc_dapm_disable_pin_unlocked - disable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Disables input/output pin and its parents or children widgets.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 0);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin_unlocked);
+
+/**
* snd_soc_dapm_disable_pin - disable pin.
* @dapm: DAPM context
* @pin: pin name
*
* Disables input/output pin and its parents or children widgets.
+ *
* NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
* do any widget power switching.
*/
int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 0);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 0);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
/**
+ * snd_soc_dapm_nc_pin_unlocked - permanently disable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Marks the specified pin as being not connected, disabling it along
+ * any parent or child widgets. At present this is identical to
+ * snd_soc_dapm_disable_pin() but in future it will be extended to do
+ * additional things such as disabling controls which only affect
+ * paths through the pin.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 0);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin_unlocked);
+
+/**
* snd_soc_dapm_nc_pin - permanently disable pin.
* @dapm: DAPM context
* @pin: pin name
@@ -3845,7 +3950,15 @@
*/
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 0);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 0);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 47e1ce7..28522bd 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1989,6 +1989,7 @@
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
if (paths < 0) {
+ dpcm_path_put(&list);
dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "playback");
mutex_unlock(&card->mutex);
@@ -2018,6 +2019,7 @@
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
if (paths < 0) {
+ dpcm_path_put(&list);
dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "capture");
mutex_unlock(&card->mutex);
@@ -2082,6 +2084,7 @@
fe->dpcm[stream].runtime = fe_substream->runtime;
if (dpcm_path_get(fe, stream, &list) <= 0) {
+ dpcm_path_put(&list);
dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
fe->dai_link->name, stream ? "capture" : "playback");
}
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index e0305a1..9edd68d 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -183,14 +183,16 @@
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drvdata->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
platform_set_drvdata(pdev, drvdata);
drvdata->physbase = r->start;
if (sizeof(drvdata->physbase) > sizeof(r->start) &&
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 44b0ba4..1bed780 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -883,6 +883,7 @@
}
break;
+ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 32af6b7..d1d72ff 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -328,6 +328,11 @@
{}
};
+static const struct usbmix_name_map kef_x300a_map[] = {
+ { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+ { 0 }
+};
+
/*
* Control map entries
*/
@@ -419,6 +424,10 @@
.id = USB_ID(0x200c, 0x1018),
.map = ebox44_map,
},
+ {
+ .id = USB_ID(0x27ac, 0x1000),
+ .map = kef_x300a_map,
+ },
{ 0 } /* terminator */
};
diff --git a/tools/include/linux/hash.h b/tools/include/linux/hash.h
new file mode 100644
index 0000000..d026c65
--- /dev/null
+++ b/tools/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../include/linux/hash.h"
+
+#ifndef _TOOLS_LINUX_HASH_H
+#define _TOOLS_LINUX_HASH_H
+#endif
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index ed2f51e..ce00f7e 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -9,8 +9,10 @@
LIB_OBJS=
LIB_H += fs/debugfs.h
+LIB_H += fs/fs.h
LIB_OBJS += $(OUTPUT)fs/debugfs.o
+LIB_OBJS += $(OUTPUT)fs/fs.o
LIBFILE = libapikfs.a
diff --git a/tools/perf/util/fs.c b/tools/lib/api/fs/fs.c
similarity index 91%
rename from tools/perf/util/fs.c
rename to tools/lib/api/fs/fs.c
index f5be1f2..5b5eb78 100644
--- a/tools/perf/util/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -1,8 +1,13 @@
+/* TODO merge/factor in debugfs.c here */
-/* TODO merge/factor into tools/lib/lk/debugfs.c */
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/vfs.h>
-#include "util.h"
-#include "util/fs.h"
+#include "debugfs.h"
+#include "fs.h"
static const char * const sysfs__fs_known_mountpoints[] = {
"/sys",
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
new file mode 100644
index 0000000..cb70495
--- /dev/null
+++ b/tools/lib/api/fs/fs.h
@@ -0,0 +1,14 @@
+#ifndef __API_FS__
+#define __API_FS__
+
+#ifndef SYSFS_MAGIC
+#define SYSFS_MAGIC 0x62656572
+#endif
+
+#ifndef PROC_SUPER_MAGIC
+#define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+const char *sysfs__mountpoint(void);
+const char *procfs__mountpoint(void);
+#endif /* __API_FS__ */
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index da8b7aa..07b0b75 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -87,8 +87,8 @@
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
-srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
-objtree := $(CURDIR)
+srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)))
+objtree := $(realpath $(CURDIR))
src := $(srctree)
obj := $(objtree)
@@ -112,7 +112,7 @@
LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION)
-INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES)
+INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index f8465a8..23bd69c 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -418,7 +418,7 @@
__attribute__((constructor)) static void init_preload(void)
{
- if (__init_state != done)
+ if (__init_state == done)
return;
#ifndef __GLIBC__
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
old mode 100644
new mode 100755
diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h
new file mode 100644
index 0000000..d82b170b
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h
index 4c99fcb..042ee8e 100644
--- a/tools/lib/lockdep/uinclude/linux/rcu.h
+++ b/tools/lib/lockdep/uinclude/linux/rcu.h
@@ -13,4 +13,9 @@
return 1;
}
+static inline bool rcu_is_watching(void)
+{
+ return false;
+}
+
#endif
diff --git a/tools/net/Makefile b/tools/net/Makefile
index 004cd74..ee577ea 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -12,7 +12,7 @@
all : bpf_jit_disasm bpf_dbg bpf_asm
-bpf_jit_disasm : CFLAGS = -Wall -O2
+bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
bpf_jit_disasm : bpf_jit_disasm.o
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 888d511..1d78a40 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -18,6 +18,10 @@
"perf mem -t <TYPE> report" displays the result. It invokes perf report with the
right set of options to display a memory access profile.
+Note that on Intel systems the memory latency reported is the use-latency,
+not the pure load (or store latency). Use latency includes any pipeline
+queueing delays in addition to the memory subsystem latency.
+
OPTIONS
-------
<command>...::
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index b715cb7..1513935 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -136,6 +136,8 @@
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
+
LINE SYNTAX
-----------
Line range is described by following syntax.
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index f41572d..c0c87c8 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -6,6 +6,7 @@
tools/lib/symbol/kallsyms.h
tools/include/asm/bug.h
tools/include/linux/compiler.h
+tools/include/linux/hash.h
include/linux/const.h
include/linux/perf_event.h
include/linux/rbtree.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 7257e7e..50d875d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -7,6 +7,8 @@
# Define V to have a more verbose compile.
#
+# Define VF to have a more verbose feature check output.
+#
# Define O to save output files in a separate directory.
#
# Define ARCH as name of target architecture if you want cross-builds.
@@ -55,6 +57,9 @@
# Define NO_LIBAUDIT if you do not want libaudit support
#
# Define NO_LIBBIONIC if you do not want bionic support
+#
+# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
+# for dwarf backtrace post unwind.
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
@@ -208,7 +213,7 @@
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
LIB_H += ../../include/uapi/linux/const.h
-LIB_H += ../../include/linux/hash.h
+LIB_H += ../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
@@ -218,9 +223,7 @@
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
LIB_H += util/include/linux/export.h
-LIB_H += util/include/linux/magic.h
LIB_H += util/include/linux/poison.h
-LIB_H += util/include/linux/prefetch.h
LIB_H += util/include/linux/rbtree.h
LIB_H += util/include/linux/rbtree_augmented.h
LIB_H += util/include/linux/string.h
@@ -244,7 +247,6 @@
LIB_H += util/callchain.h
LIB_H += util/build-id.h
LIB_H += util/debug.h
-LIB_H += util/fs.h
LIB_H += util/pmu.h
LIB_H += util/event.h
LIB_H += util/evsel.h
@@ -306,7 +308,6 @@
LIB_OBJS += $(OUTPUT)util/build-id.o
LIB_OBJS += $(OUTPUT)util/config.o
LIB_OBJS += $(OUTPUT)util/ctype.o
-LIB_OBJS += $(OUTPUT)util/fs.o
LIB_OBJS += $(OUTPUT)util/pmu.o
LIB_OBJS += $(OUTPUT)util/environment.o
LIB_OBJS += $(OUTPUT)util/event.o
@@ -408,6 +409,11 @@
LIB_OBJS += $(OUTPUT)tests/code-reading.o
LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
+ifndef NO_DWARF_UNWIND
+ifeq ($(ARCH),x86)
+LIB_OBJS += $(OUTPUT)tests/dwarf-unwind.o
+endif
+endif
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -420,6 +426,9 @@
endif
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
@@ -475,8 +484,13 @@
endif # NO_DWARF
endif # NO_LIBELF
+ifndef NO_LIBDW_DWARF_UNWIND
+ LIB_OBJS += $(OUTPUT)util/unwind-libdw.o
+ LIB_H += util/unwind-libdw.h
+endif
+
ifndef NO_LIBUNWIND
- LIB_OBJS += $(OUTPUT)util/unwind.o
+ LIB_OBJS += $(OUTPUT)util/unwind-libunwind.o
endif
LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
@@ -533,6 +547,7 @@
ifeq ($(ARCH),x86)
LIB_H += arch/x86/include/perf_regs.h
endif
+ LIB_OBJS += $(OUTPUT)util/perf_regs.o
endif
ifndef NO_LIBNUMA
@@ -655,6 +670,9 @@
-DPYTHON='"$(PYTHON_WORD)"' \
$<
+$(OUTPUT)tests/dwarf-unwind.o: tests/dwarf-unwind.c
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -fno-optimize-sibling-calls $<
+
$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
@@ -707,9 +725,15 @@
# we depend the various files onto their directories.
DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(GTK_OBJS)
DIRECTORY_DEPS += $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
+# no need to add flex objects, because they depend on bison ones
+DIRECTORY_DEPS += $(OUTPUT)util/parse-events-bison.c
+DIRECTORY_DEPS += $(OUTPUT)util/pmu-bison.c
+
+OUTPUT_DIRECTORIES := $(sort $(dir $(DIRECTORY_DEPS)))
+
+$(DIRECTORY_DEPS): | $(OUTPUT_DIRECTORIES)
# In the second step, we make a rule to actually create these directories
-$(sort $(dir $(DIRECTORY_DEPS))):
+$(OUTPUT_DIRECTORIES):
$(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
$(LIB_FILE): $(LIB_OBJS)
@@ -886,7 +910,7 @@
clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf
- $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
+ $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index fe9b61e..67e9b3d 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -3,5 +3,5 @@
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
endif
diff --git a/tools/perf/arch/arm/util/unwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c
similarity index 95%
rename from tools/perf/arch/arm/util/unwind.c
rename to tools/perf/arch/arm/util/unwind-libunwind.c
index da3dc95..729ed69 100644
--- a/tools/perf/arch/arm/util/unwind.c
+++ b/tools/perf/arch/arm/util/unwind-libunwind.c
@@ -4,7 +4,7 @@
#include "perf_regs.h"
#include "../../util/unwind.h"
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
{
switch (regnum) {
case UNW_ARM_R0:
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 8801fe0..1641542 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -3,7 +3,14 @@
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
+endif
+ifndef NO_LIBDW_DWARF_UNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o
+endif
+ifndef NO_DWARF_UNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o
endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index e84ca76..fc819ca 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -5,14 +5,20 @@
#include "../../util/types.h"
#include <asm/perf_regs.h>
+void perf_regs_load(u64 *regs);
+
#ifndef HAVE_ARCH_X86_64_SUPPORT
#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
+#define PERF_REGS_MAX PERF_REG_X86_32_MAX
+#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#else
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
(1ULL << PERF_REG_X86_FS) | \
(1ULL << PERF_REG_X86_GS))
#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
+#define PERF_REGS_MAX PERF_REG_X86_64_MAX
+#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#endif
#define PERF_REG_IP PERF_REG_X86_IP
#define PERF_REG_SP PERF_REG_X86_SP
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
new file mode 100644
index 0000000..b602ad9
--- /dev/null
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -0,0 +1,59 @@
+#include <string.h>
+#include "perf_regs.h"
+#include "thread.h"
+#include "map.h"
+#include "event.h"
+#include "tests/tests.h"
+
+#define STACK_SIZE 8192
+
+static int sample_ustack(struct perf_sample *sample,
+ struct thread *thread, u64 *regs)
+{
+ struct stack_dump *stack = &sample->user_stack;
+ struct map *map;
+ unsigned long sp;
+ u64 stack_size, *buf;
+
+ buf = malloc(STACK_SIZE);
+ if (!buf) {
+ pr_debug("failed to allocate sample uregs data\n");
+ return -1;
+ }
+
+ sp = (unsigned long) regs[PERF_REG_X86_SP];
+
+ map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+ if (!map) {
+ pr_debug("failed to get stack map\n");
+ return -1;
+ }
+
+ stack_size = map->end - sp;
+ stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
+
+ memcpy(buf, (void *) sp, stack_size);
+ stack->data = (char *) buf;
+ stack->size = stack_size;
+ return 0;
+}
+
+int test__arch_unwind_sample(struct perf_sample *sample,
+ struct thread *thread)
+{
+ struct regs_dump *regs = &sample->user_regs;
+ u64 *buf;
+
+ buf = malloc(sizeof(u64) * PERF_REGS_MAX);
+ if (!buf) {
+ pr_debug("failed to allocate sample uregs data\n");
+ return -1;
+ }
+
+ perf_regs_load(buf);
+ regs->abi = PERF_SAMPLE_REGS_ABI;
+ regs->regs = buf;
+ regs->mask = PERF_REGS_MASK;
+
+ return sample_ustack(sample, thread, buf);
+}
diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S
new file mode 100644
index 0000000..99167bf
--- /dev/null
+++ b/tools/perf/arch/x86/tests/regs_load.S
@@ -0,0 +1,92 @@
+
+#include <linux/linkage.h>
+
+#define AX 0
+#define BX 1 * 8
+#define CX 2 * 8
+#define DX 3 * 8
+#define SI 4 * 8
+#define DI 5 * 8
+#define BP 6 * 8
+#define SP 7 * 8
+#define IP 8 * 8
+#define FLAGS 9 * 8
+#define CS 10 * 8
+#define SS 11 * 8
+#define DS 12 * 8
+#define ES 13 * 8
+#define FS 14 * 8
+#define GS 15 * 8
+#define R8 16 * 8
+#define R9 17 * 8
+#define R10 18 * 8
+#define R11 19 * 8
+#define R12 20 * 8
+#define R13 21 * 8
+#define R14 22 * 8
+#define R15 23 * 8
+
+.text
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+ENTRY(perf_regs_load)
+ movq %rax, AX(%rdi)
+ movq %rbx, BX(%rdi)
+ movq %rcx, CX(%rdi)
+ movq %rdx, DX(%rdi)
+ movq %rsi, SI(%rdi)
+ movq %rdi, DI(%rdi)
+ movq %rbp, BP(%rdi)
+
+ leaq 8(%rsp), %rax /* exclude this call. */
+ movq %rax, SP(%rdi)
+
+ movq 0(%rsp), %rax
+ movq %rax, IP(%rdi)
+
+ movq $0, FLAGS(%rdi)
+ movq $0, CS(%rdi)
+ movq $0, SS(%rdi)
+ movq $0, DS(%rdi)
+ movq $0, ES(%rdi)
+ movq $0, FS(%rdi)
+ movq $0, GS(%rdi)
+
+ movq %r8, R8(%rdi)
+ movq %r9, R9(%rdi)
+ movq %r10, R10(%rdi)
+ movq %r11, R11(%rdi)
+ movq %r12, R12(%rdi)
+ movq %r13, R13(%rdi)
+ movq %r14, R14(%rdi)
+ movq %r15, R15(%rdi)
+ ret
+ENDPROC(perf_regs_load)
+#else
+ENTRY(perf_regs_load)
+ push %edi
+ movl 8(%esp), %edi
+ movl %eax, AX(%edi)
+ movl %ebx, BX(%edi)
+ movl %ecx, CX(%edi)
+ movl %edx, DX(%edi)
+ movl %esi, SI(%edi)
+ pop %eax
+ movl %eax, DI(%edi)
+ movl %ebp, BP(%edi)
+
+ leal 4(%esp), %eax /* exclude this call. */
+ movl %eax, SP(%edi)
+
+ movl 0(%esp), %eax
+ movl %eax, IP(%edi)
+
+ movl $0, FLAGS(%edi)
+ movl $0, CS(%edi)
+ movl $0, SS(%edi)
+ movl $0, DS(%edi)
+ movl $0, ES(%edi)
+ movl $0, FS(%edi)
+ movl $0, GS(%edi)
+ ret
+ENDPROC(perf_regs_load)
+#endif
diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c
new file mode 100644
index 0000000..c4b7217
--- /dev/null
+++ b/tools/perf/arch/x86/util/unwind-libdw.c
@@ -0,0 +1,51 @@
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct regs_dump *user_regs = &ui->sample->user_regs;
+ Dwarf_Word dwarf_regs[17];
+ unsigned nregs;
+
+#define REG(r) ({ \
+ Dwarf_Word val = 0; \
+ perf_reg_value(&val, user_regs, PERF_REG_X86_##r); \
+ val; \
+})
+
+ if (user_regs->abi == PERF_SAMPLE_REGS_ABI_32) {
+ dwarf_regs[0] = REG(AX);
+ dwarf_regs[1] = REG(CX);
+ dwarf_regs[2] = REG(DX);
+ dwarf_regs[3] = REG(BX);
+ dwarf_regs[4] = REG(SP);
+ dwarf_regs[5] = REG(BP);
+ dwarf_regs[6] = REG(SI);
+ dwarf_regs[7] = REG(DI);
+ dwarf_regs[8] = REG(IP);
+ nregs = 9;
+ } else {
+ dwarf_regs[0] = REG(AX);
+ dwarf_regs[1] = REG(DX);
+ dwarf_regs[2] = REG(CX);
+ dwarf_regs[3] = REG(BX);
+ dwarf_regs[4] = REG(SI);
+ dwarf_regs[5] = REG(DI);
+ dwarf_regs[6] = REG(BP);
+ dwarf_regs[7] = REG(SP);
+ dwarf_regs[8] = REG(R8);
+ dwarf_regs[9] = REG(R9);
+ dwarf_regs[10] = REG(R10);
+ dwarf_regs[11] = REG(R11);
+ dwarf_regs[12] = REG(R12);
+ dwarf_regs[13] = REG(R13);
+ dwarf_regs[14] = REG(R14);
+ dwarf_regs[15] = REG(R15);
+ dwarf_regs[16] = REG(IP);
+ nregs = 17;
+ }
+
+ return dwfl_thread_state_registers(thread, 0, nregs, dwarf_regs);
+}
diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
similarity index 95%
rename from tools/perf/arch/x86/util/unwind.c
rename to tools/perf/arch/x86/util/unwind-libunwind.c
index 456a88c..3261f68 100644
--- a/tools/perf/arch/x86/util/unwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -5,7 +5,7 @@
#include "../../util/unwind.h"
#ifdef HAVE_ARCH_X86_64_SUPPORT
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
{
int id;
@@ -69,7 +69,7 @@
return id;
}
#else
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
{
int id;
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 0fdc852..eba46709 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -31,6 +31,9 @@
extern int bench_mem_memcpy(int argc, const char **argv,
const char *prefix __maybe_unused);
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
+extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
+extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
+extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
new file mode 100644
index 0000000..a84206e
--- /dev/null
+++ b/tools/perf/bench/futex-hash.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
+ *
+ * This program is particularly useful for measuring the kernel's futex hash
+ * table/function implementation. In order for it to make sense, use with as
+ * many threads and futexes as possible.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+static unsigned int nthreads = 0;
+static unsigned int nsecs = 10;
+/* amount of futexes per thread */
+static unsigned int nfutexes = 1024;
+static bool fshared = false, done = false, silent = false;
+
+struct timeval start, end, runtime;
+static pthread_mutex_t thread_lock;
+static unsigned int threads_starting;
+static struct stats throughput_stats;
+static pthread_cond_t thread_parent, thread_worker;
+
+struct worker {
+ int tid;
+ u_int32_t *futex;
+ pthread_t thread;
+ unsigned long ops;
+};
+
+static const struct option options[] = {
+ OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
+ OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"),
+ OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_END()
+};
+
+static const char * const bench_futex_hash_usage[] = {
+ "perf bench futex hash <options>",
+ NULL
+};
+
+static void *workerfn(void *arg)
+{
+ int ret;
+ unsigned int i;
+ struct worker *w = (struct worker *) arg;
+
+ pthread_mutex_lock(&thread_lock);
+ threads_starting--;
+ if (!threads_starting)
+ pthread_cond_signal(&thread_parent);
+ pthread_cond_wait(&thread_worker, &thread_lock);
+ pthread_mutex_unlock(&thread_lock);
+
+ do {
+ for (i = 0; i < nfutexes; i++, w->ops++) {
+ /*
+ * We want the futex calls to fail in order to stress
+ * the hashing of uaddr and not measure other steps,
+ * such as internal waitqueue handling, thus enlarging
+ * the critical region protected by hb->lock.
+ */
+ ret = futex_wait(&w->futex[i], 1234, NULL,
+ fshared ? 0 : FUTEX_PRIVATE_FLAG);
+ if (!silent &&
+ (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
+ warn("Non-expected futex return call");
+ }
+ } while (!done);
+
+ return NULL;
+}
+
+static void toggle_done(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ /* inform all threads that we're done for the day */
+ done = true;
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &runtime);
+}
+
+static void print_summary(void)
+{
+ unsigned long avg = avg_stats(&throughput_stats);
+ double stddev = stddev_stats(&throughput_stats);
+
+ printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+ !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+ (int) runtime.tv_sec);
+}
+
+int bench_futex_hash(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ int ret = 0;
+ cpu_set_t cpu;
+ struct sigaction act;
+ unsigned int i, ncpus;
+ pthread_attr_t thread_attr;
+ struct worker *worker = NULL;
+
+ argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
+ if (argc) {
+ usage_with_options(bench_futex_hash_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = toggle_done;
+ sigaction(SIGINT, &act, NULL);
+
+ if (!nthreads) /* default to the number of CPUs */
+ nthreads = ncpus;
+
+ worker = calloc(nthreads, sizeof(*worker));
+ if (!worker)
+ goto errmem;
+
+ printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
+ getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs);
+
+ init_stats(&throughput_stats);
+ pthread_mutex_init(&thread_lock, NULL);
+ pthread_cond_init(&thread_parent, NULL);
+ pthread_cond_init(&thread_worker, NULL);
+
+ threads_starting = nthreads;
+ pthread_attr_init(&thread_attr);
+ gettimeofday(&start, NULL);
+ for (i = 0; i < nthreads; i++) {
+ worker[i].tid = i;
+ worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
+ if (!worker[i].futex)
+ goto errmem;
+
+ CPU_ZERO(&cpu);
+ CPU_SET(i % ncpus, &cpu);
+
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+ ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
+ (void *)(struct worker *) &worker[i]);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_create");
+
+ }
+ pthread_attr_destroy(&thread_attr);
+
+ pthread_mutex_lock(&thread_lock);
+ while (threads_starting)
+ pthread_cond_wait(&thread_parent, &thread_lock);
+ pthread_cond_broadcast(&thread_worker);
+ pthread_mutex_unlock(&thread_lock);
+
+ sleep(nsecs);
+ toggle_done(0, NULL, NULL);
+
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_join(worker[i].thread, NULL);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_join");
+ }
+
+ /* cleanup & report results */
+ pthread_cond_destroy(&thread_parent);
+ pthread_cond_destroy(&thread_worker);
+ pthread_mutex_destroy(&thread_lock);
+
+ for (i = 0; i < nthreads; i++) {
+ unsigned long t = worker[i].ops/runtime.tv_sec;
+ update_stats(&throughput_stats, t);
+ if (!silent) {
+ if (nfutexes == 1)
+ printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
+ worker[i].tid, &worker[i].futex[0], t);
+ else
+ printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
+ worker[i].tid, &worker[i].futex[0],
+ &worker[i].futex[nfutexes-1], t);
+ }
+
+ free(worker[i].futex);
+ }
+
+ print_summary();
+
+ free(worker);
+ return ret;
+errmem:
+ err(EXIT_FAILURE, "calloc");
+}
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
new file mode 100644
index 0000000..a1625587
--- /dev/null
+++ b/tools/perf/bench/futex-requeue.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-requeue: Block a bunch of threads on futex1 and requeue them
+ * on futex2, N at a time.
+ *
+ * This program is particularly useful to measure the latency of nthread
+ * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+static u_int32_t futex1 = 0, futex2 = 0;
+
+/*
+ * How many tasks to requeue at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+static unsigned int nrequeue = 1;
+
+/*
+ * There can be significant variance from run to run,
+ * the more repeats, the more exact the overall avg and
+ * the better idea of the futex latency.
+ */
+static unsigned int repeat = 10;
+
+static pthread_t *worker;
+static bool done = 0, silent = 0;
+static pthread_mutex_t thread_lock;
+static pthread_cond_t thread_parent, thread_worker;
+static struct stats requeuetime_stats, requeued_stats;
+static unsigned int ncpus, threads_starting, nthreads = 0;
+
+static const struct option options[] = {
+ OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
+ OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"),
+ OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
+ OPT_END()
+};
+
+static const char * const bench_futex_requeue_usage[] = {
+ "perf bench futex requeue <options>",
+ NULL
+};
+
+static void print_summary(void)
+{
+ double requeuetime_avg = avg_stats(&requeuetime_stats);
+ double requeuetime_stddev = stddev_stats(&requeuetime_stats);
+ unsigned int requeued_avg = avg_stats(&requeued_stats);
+
+ printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
+ requeued_avg,
+ nthreads,
+ requeuetime_avg/1e3,
+ rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
+}
+
+static void *workerfn(void *arg __maybe_unused)
+{
+ pthread_mutex_lock(&thread_lock);
+ threads_starting--;
+ if (!threads_starting)
+ pthread_cond_signal(&thread_parent);
+ pthread_cond_wait(&thread_worker, &thread_lock);
+ pthread_mutex_unlock(&thread_lock);
+
+ futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+ return NULL;
+}
+
+static void block_threads(pthread_t *w,
+ pthread_attr_t thread_attr)
+{
+ cpu_set_t cpu;
+ unsigned int i;
+
+ threads_starting = nthreads;
+
+ /* create and block all threads */
+ for (i = 0; i < nthreads; i++) {
+ CPU_ZERO(&cpu);
+ CPU_SET(i % ncpus, &cpu);
+
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+ if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+ err(EXIT_FAILURE, "pthread_create");
+ }
+}
+
+static void toggle_done(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ done = true;
+}
+
+int bench_futex_requeue(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ int ret = 0;
+ unsigned int i, j;
+ struct sigaction act;
+ pthread_attr_t thread_attr;
+
+ argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
+ if (argc)
+ goto err;
+
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = toggle_done;
+ sigaction(SIGINT, &act, NULL);
+
+ if (!nthreads)
+ nthreads = ncpus;
+
+ worker = calloc(nthreads, sizeof(*worker));
+ if (!worker)
+ err(EXIT_FAILURE, "calloc");
+
+ printf("Run summary [PID %d]: Requeuing %d threads (from %p to %p), "
+ "%d at a time.\n\n",
+ getpid(), nthreads, &futex1, &futex2, nrequeue);
+
+ init_stats(&requeued_stats);
+ init_stats(&requeuetime_stats);
+ pthread_attr_init(&thread_attr);
+ pthread_mutex_init(&thread_lock, NULL);
+ pthread_cond_init(&thread_parent, NULL);
+ pthread_cond_init(&thread_worker, NULL);
+
+ for (j = 0; j < repeat && !done; j++) {
+ unsigned int nrequeued = 0;
+ struct timeval start, end, runtime;
+
+ /* create, launch & block all threads */
+ block_threads(worker, thread_attr);
+
+ /* make sure all threads are already blocked */
+ pthread_mutex_lock(&thread_lock);
+ while (threads_starting)
+ pthread_cond_wait(&thread_parent, &thread_lock);
+ pthread_cond_broadcast(&thread_worker);
+ pthread_mutex_unlock(&thread_lock);
+
+ usleep(100000);
+
+ /* Ok, all threads are patiently blocked, start requeueing */
+ gettimeofday(&start, NULL);
+ for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue)
+ /*
+ * Do not wakeup any tasks blocked on futex1, allowing
+ * us to really measure futex_wait functionality.
+ */
+ futex_cmp_requeue(&futex1, 0, &futex2, 0, nrequeue,
+ FUTEX_PRIVATE_FLAG);
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &runtime);
+
+ update_stats(&requeued_stats, nrequeued);
+ update_stats(&requeuetime_stats, runtime.tv_usec);
+
+ if (!silent) {
+ printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n",
+ j + 1, nrequeued, nthreads, runtime.tv_usec/1e3);
+ }
+
+ /* everybody should be blocked on futex2, wake'em up */
+ nrequeued = futex_wake(&futex2, nthreads, FUTEX_PRIVATE_FLAG);
+ if (nthreads != nrequeued)
+ warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
+
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_join(worker[i], NULL);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_join");
+ }
+
+ }
+
+ /* cleanup & report results */
+ pthread_cond_destroy(&thread_parent);
+ pthread_cond_destroy(&thread_worker);
+ pthread_mutex_destroy(&thread_lock);
+ pthread_attr_destroy(&thread_attr);
+
+ print_summary();
+
+ free(worker);
+ return ret;
+err:
+ usage_with_options(bench_futex_requeue_usage, options);
+ exit(EXIT_FAILURE);
+}
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
new file mode 100644
index 0000000..d096169
--- /dev/null
+++ b/tools/perf/bench/futex-wake.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time.
+ *
+ * This program is particularly useful to measure the latency of nthread wakeups
+ * in non-error situations: all waiters are queued and all wake calls wakeup
+ * one or more tasks, and thus the waitqueue is never empty.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+/* all threads will block on the same futex */
+static u_int32_t futex1 = 0;
+
+/*
+ * How many wakeups to do at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+static unsigned int nwakes = 1;
+
+/*
+ * There can be significant variance from run to run,
+ * the more repeats, the more exact the overall avg and
+ * the better idea of the futex latency.
+ */
+static unsigned int repeat = 10;
+
+pthread_t *worker;
+static bool done = 0, silent = 0;
+static pthread_mutex_t thread_lock;
+static pthread_cond_t thread_parent, thread_worker;
+static struct stats waketime_stats, wakeup_stats;
+static unsigned int ncpus, threads_starting, nthreads = 0;
+
+static const struct option options[] = {
+ OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"),
+ OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"),
+ OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
+ OPT_END()
+};
+
+static const char * const bench_futex_wake_usage[] = {
+ "perf bench futex wake <options>",
+ NULL
+};
+
+static void *workerfn(void *arg __maybe_unused)
+{
+ pthread_mutex_lock(&thread_lock);
+ threads_starting--;
+ if (!threads_starting)
+ pthread_cond_signal(&thread_parent);
+ pthread_cond_wait(&thread_worker, &thread_lock);
+ pthread_mutex_unlock(&thread_lock);
+
+ futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+ return NULL;
+}
+
+static void print_summary(void)
+{
+ double waketime_avg = avg_stats(&waketime_stats);
+ double waketime_stddev = stddev_stats(&waketime_stats);
+ unsigned int wakeup_avg = avg_stats(&wakeup_stats);
+
+ printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n",
+ wakeup_avg,
+ nthreads,
+ waketime_avg/1e3,
+ rel_stddev_stats(waketime_stddev, waketime_avg));
+}
+
+static void block_threads(pthread_t *w,
+ pthread_attr_t thread_attr)
+{
+ cpu_set_t cpu;
+ unsigned int i;
+
+ threads_starting = nthreads;
+
+ /* create and block all threads */
+ for (i = 0; i < nthreads; i++) {
+ CPU_ZERO(&cpu);
+ CPU_SET(i % ncpus, &cpu);
+
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+ if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+ err(EXIT_FAILURE, "pthread_create");
+ }
+}
+
+static void toggle_done(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ done = true;
+}
+
+int bench_futex_wake(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ int ret = 0;
+ unsigned int i, j;
+ struct sigaction act;
+ pthread_attr_t thread_attr;
+
+ argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
+ if (argc) {
+ usage_with_options(bench_futex_wake_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = toggle_done;
+ sigaction(SIGINT, &act, NULL);
+
+ if (!nthreads)
+ nthreads = ncpus;
+
+ worker = calloc(nthreads, sizeof(*worker));
+ if (!worker)
+ err(EXIT_FAILURE, "calloc");
+
+ printf("Run summary [PID %d]: blocking on %d threads (at futex %p), "
+ "waking up %d at a time.\n\n",
+ getpid(), nthreads, &futex1, nwakes);
+
+ init_stats(&wakeup_stats);
+ init_stats(&waketime_stats);
+ pthread_attr_init(&thread_attr);
+ pthread_mutex_init(&thread_lock, NULL);
+ pthread_cond_init(&thread_parent, NULL);
+ pthread_cond_init(&thread_worker, NULL);
+
+ for (j = 0; j < repeat && !done; j++) {
+ unsigned int nwoken = 0;
+ struct timeval start, end, runtime;
+
+ /* create, launch & block all threads */
+ block_threads(worker, thread_attr);
+
+ /* make sure all threads are already blocked */
+ pthread_mutex_lock(&thread_lock);
+ while (threads_starting)
+ pthread_cond_wait(&thread_parent, &thread_lock);
+ pthread_cond_broadcast(&thread_worker);
+ pthread_mutex_unlock(&thread_lock);
+
+ usleep(100000);
+
+ /* Ok, all threads are patiently blocked, start waking folks up */
+ gettimeofday(&start, NULL);
+ while (nwoken != nthreads)
+ nwoken += futex_wake(&futex1, nwakes, FUTEX_PRIVATE_FLAG);
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &runtime);
+
+ update_stats(&wakeup_stats, nwoken);
+ update_stats(&waketime_stats, runtime.tv_usec);
+
+ if (!silent) {
+ printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n",
+ j + 1, nwoken, nthreads, runtime.tv_usec/1e3);
+ }
+
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_join(worker[i], NULL);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_join");
+ }
+
+ }
+
+ /* cleanup & report results */
+ pthread_cond_destroy(&thread_parent);
+ pthread_cond_destroy(&thread_worker);
+ pthread_mutex_destroy(&thread_lock);
+ pthread_attr_destroy(&thread_attr);
+
+ print_summary();
+
+ free(worker);
+ return ret;
+}
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
new file mode 100644
index 0000000..71f2844
--- /dev/null
+++ b/tools/perf/bench/futex.h
@@ -0,0 +1,71 @@
+/*
+ * Glibc independent futex library for testing kernel functionality.
+ * Shamelessly stolen from Darren Hart <dvhltc@us.ibm.com>
+ * http://git.kernel.org/cgit/linux/kernel/git/dvhart/futextest.git/
+ */
+
+#ifndef _FUTEX_H
+#define _FUTEX_H
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <linux/futex.h>
+
+/**
+ * futex() - SYS_futex syscall wrapper
+ * @uaddr: address of first futex
+ * @op: futex op code
+ * @val: typically expected value of uaddr, but varies by op
+ * @timeout: typically an absolute struct timespec (except where noted
+ * otherwise). Overloaded by some ops
+ * @uaddr2: address of second futex for some ops\
+ * @val3: varies by op
+ * @opflags: flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG
+ *
+ * futex() is used by all the following futex op wrappers. It can also be
+ * used for misuse and abuse testing. Generally, the specific op wrappers
+ * should be used instead. It is a macro instead of an static inline function as
+ * some of the types over overloaded (timeout is used for nr_requeue for
+ * example).
+ *
+ * These argument descriptions are the defaults for all
+ * like-named arguments in the following wrappers except where noted below.
+ */
+#define futex(uaddr, op, val, timeout, uaddr2, val3, opflags) \
+ syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3)
+
+/**
+ * futex_wait() - block on uaddr with optional timeout
+ * @timeout: relative timeout
+ */
+static inline int
+futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags)
+{
+ return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
+}
+
+/**
+ * futex_wake() - wake one or more tasks blocked on uaddr
+ * @nr_wake: wake up to this many tasks
+ */
+static inline int
+futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
+{
+ return futex(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
+}
+
+/**
+* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
+* @nr_wake: wake up to this many tasks
+* @nr_requeue: requeue up to this many tasks
+*/
+static inline int
+futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake,
+ int nr_requeue, int opflags)
+{
+ return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
+ val, opflags);
+}
+
+#endif /* _FUTEX_H */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d4c83c6..97d86d8 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1593,6 +1593,7 @@
p->data_rand_walk = true;
p->nr_loops = -1;
p->init_random = true;
+ p->run_all = argc == 1;
}
static int run_bench_numa(const char *name, const char **argv)
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index e47f90c..1e6e777 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -12,6 +12,7 @@
* sched ... scheduler and IPC performance
* mem ... memory access performance
* numa ... NUMA scheduling and MM performance
+ * futex ... Futex performance
*/
#include "perf.h"
#include "util/util.h"
@@ -54,6 +55,14 @@
{ NULL, NULL, NULL }
};
+static struct bench futex_benchmarks[] = {
+ { "hash", "Benchmark for futex hash table", bench_futex_hash },
+ { "wake", "Benchmark for futex wake calls", bench_futex_wake },
+ { "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
+ { "all", "Test all futex benchmarks", NULL },
+ { NULL, NULL, NULL }
+};
+
struct collection {
const char *name;
const char *summary;
@@ -61,11 +70,12 @@
};
static struct collection collections[] = {
- { "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
+ { "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
{ "mem", "Memory access benchmarks", mem_benchmarks },
#ifdef HAVE_LIBNUMA_SUPPORT
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
+ {"futex", "Futex stressing benchmarks", futex_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
@@ -76,7 +86,7 @@
/* Iterate over all benchmarks within a collection: */
#define for_each_bench(coll, bench) \
- for (bench = coll->benchmarks; bench->name; bench++)
+ for (bench = coll->benchmarks; bench && bench->name; bench++)
static void dump_benchmarks(struct collection *coll)
{
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index a77e312..204fffe 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -952,8 +952,8 @@
dfmt->header_width, buf);
}
-static int hpp__header(struct perf_hpp_fmt *fmt,
- struct perf_hpp *hpp)
+static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
@@ -963,7 +963,8 @@
}
static int hpp__width(struct perf_hpp_fmt *fmt,
- struct perf_hpp *hpp __maybe_unused)
+ struct perf_hpp *hpp __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index b346601..3a73875 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -312,7 +312,6 @@
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
- evsel->attr.sample_regs_user,
evsel->attr.read_format, &sample_sw,
false);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index a735051..21c164b 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1691,17 +1691,15 @@
OPT_END()
};
-
- const char * const kvm_usage[] = {
- "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
- NULL
- };
+ const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
+ "buildid-list", "stat", NULL };
+ const char *kvm_usage[] = { NULL, NULL };
perf_host = 0;
perf_guest = 1;
- argc = parse_options(argc, argv, kvm_options, kvm_usage,
- PARSE_OPT_STOP_AT_NON_OPTION);
+ argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kvm_usage, kvm_options);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 7894888..cdcd4eb 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -268,9 +268,9 @@
return 0;
}
-static void init_params(void)
+static int init_params(void)
{
- line_range__init(¶ms.line_range);
+ return line_range__init(¶ms.line_range);
}
static void cleanup_params(void)
@@ -515,9 +515,11 @@
{
int ret;
- init_params();
- ret = __cmd_probe(argc, argv, prefix);
- cleanup_params();
+ ret = init_params();
+ if (!ret) {
+ ret = __cmd_probe(argc, argv, prefix);
+ cleanup_params();
+ }
return ret;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index af47531..eb524f9 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -649,7 +649,7 @@
return ret;
}
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
static int get_stack_size(char *str, unsigned long *_size)
{
char *endptr;
@@ -675,7 +675,7 @@
max_size, str);
return -1;
}
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
int record_parse_callchain(const char *arg, struct record_opts *opts)
{
@@ -704,7 +704,7 @@
"needed for -g fp\n");
break;
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
/* Dwarf style */
} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
const unsigned long default_stack_dump_size = 8192;
@@ -720,7 +720,7 @@
ret = get_stack_size(tok, &size);
opts->stack_dump_size = size;
}
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
} else {
pr_err("callchain: Unknown --call-graph option "
"value: %s\n", arg);
@@ -735,7 +735,9 @@
static void callchain_debug(struct record_opts *opts)
{
- pr_debug("callchain: type %d\n", opts->call_graph);
+ static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
+
+ pr_debug("callchain: type %s\n", str[opts->call_graph]);
if (opts->call_graph == CALLCHAIN_DWARF)
pr_debug("callchain: stack dump size %d\n",
@@ -749,6 +751,8 @@
struct record_opts *opts = opt->value;
int ret;
+ opts->call_graph_enabled = !unset;
+
/* --no-call-graph */
if (unset) {
opts->call_graph = CALLCHAIN_NONE;
@@ -769,6 +773,8 @@
{
struct record_opts *opts = opt->value;
+ opts->call_graph_enabled = !unset;
+
if (opts->call_graph == CALLCHAIN_NONE)
opts->call_graph = CALLCHAIN_FP;
@@ -776,6 +782,16 @@
return 0;
}
+static int perf_record_config(const char *var, const char *value, void *cb)
+{
+ struct record *rec = cb;
+
+ if (!strcmp(var, "record.call-graph"))
+ return record_parse_callchain(value, &rec->opts);
+
+ return perf_default_config(var, value, cb);
+}
+
static const char * const record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@@ -807,7 +823,7 @@
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
#else
const char record_callchain_help[] = CALLCHAIN_HELP "fp";
@@ -907,6 +923,8 @@
if (rec->evlist == NULL)
return -ENOMEM;
+ perf_config(perf_record_config, rec);
+
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && target__none(&rec->opts.target))
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 3c53ec2..c8f2113 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -75,13 +75,10 @@
return perf_default_config(var, value, cb);
}
-static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_location *al,
- struct perf_sample *sample, struct perf_evsel *evsel,
- union perf_event *event)
+static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al,
+ struct perf_sample *sample, struct perf_evsel *evsel)
{
- struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
- u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
struct hist_entry *he;
struct mem_info *mi, *mx;
uint64_t cost;
@@ -90,7 +87,7 @@
if (err)
return err;
- mi = machine__resolve_mem(al->machine, al->thread, sample, cpumode);
+ mi = sample__resolve_mem(sample, al);
if (!mi)
return -ENOMEM;
@@ -113,14 +110,16 @@
if (!he)
return -ENOMEM;
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
- if (err)
- goto out;
+ if (ui__has_annotation()) {
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+ if (err)
+ goto out;
- mx = he->mem_info;
- err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
- if (err)
- goto out;
+ mx = he->mem_info;
+ err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
+ if (err)
+ goto out;
+ }
evsel->hists.stats.total_period += cost;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -129,10 +128,9 @@
return err;
}
-static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_location *al,
+static int report__add_branch_hist_entry(struct report *rep, struct addr_location *al,
struct perf_sample *sample, struct perf_evsel *evsel)
{
- struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
unsigned i;
struct hist_entry *he;
@@ -142,8 +140,7 @@
if (err)
return err;
- bi = machine__resolve_bstack(al->machine, al->thread,
- sample->branch_stack);
+ bi = sample__resolve_bstack(sample, al);
if (!bi)
return -ENOMEM;
@@ -164,14 +161,18 @@
he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL,
1, 1, 0);
if (he) {
- bx = he->branch_info;
- err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
- if (err)
- goto out;
+ if (ui__has_annotation()) {
+ bx = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bx->from,
+ evsel->idx);
+ if (err)
+ goto out;
- err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
- if (err)
- goto out;
+ err = addr_map_symbol__inc_samples(&bx->to,
+ evsel->idx);
+ if (err)
+ goto out;
+ }
evsel->hists.stats.total_period += 1;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -184,10 +185,9 @@
return err;
}
-static int report__add_hist_entry(struct perf_tool *tool, struct perf_evsel *evsel,
+static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel,
struct addr_location *al, struct perf_sample *sample)
{
- struct report *rep = container_of(tool, struct report, tool);
struct symbol *parent = NULL;
struct hist_entry *he;
int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
@@ -205,7 +205,9 @@
if (err)
goto out;
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+ if (ui__has_annotation())
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
out:
@@ -229,25 +231,25 @@
return -1;
}
- if (al.filtered || (rep->hide_unresolved && al.sym == NULL))
+ if (rep->hide_unresolved && al.sym == NULL)
return 0;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
if (sort__mode == SORT_MODE__BRANCH) {
- ret = report__add_branch_hist_entry(tool, &al, sample, evsel);
+ ret = report__add_branch_hist_entry(rep, &al, sample, evsel);
if (ret < 0)
pr_debug("problem adding lbr entry, skipping event\n");
} else if (rep->mem_mode == 1) {
- ret = report__add_mem_hist_entry(tool, &al, sample, evsel, event);
+ ret = report__add_mem_hist_entry(rep, &al, sample, evsel);
if (ret < 0)
pr_debug("problem adding mem entry, skipping event\n");
} else {
if (al.map != NULL)
al.map->dso->hit = 1;
- ret = report__add_hist_entry(tool, evsel, &al, sample);
+ ret = report__add_hist_entry(rep, evsel, &al, sample);
if (ret < 0)
pr_debug("problem incrementing symbol period, skipping event\n");
}
@@ -926,7 +928,7 @@
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (use_browser == 1 && sort__has_sym) {
+ if (ui__has_annotation()) {
symbol_conf.priv_size = sizeof(struct annotation);
machines__set_symbol_filter(&session->machines,
symbol__annotate_init);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 6a76a07..9ac0a49 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1124,7 +1124,7 @@
avg = work_list->total_lat / work_list->nb_atoms;
- printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+ printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
(double)work_list->total_runtime / 1e6,
work_list->nb_atoms, (double)avg / 1e6,
(double)work_list->max_lat / 1e6,
@@ -1527,9 +1527,9 @@
perf_sched__sort_lat(sched);
- printf("\n ---------------------------------------------------------------------------------------------------------------\n");
- printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
- printf(" ---------------------------------------------------------------------------------------------------------------\n");
+ printf("\n -----------------------------------------------------------------------------------------------------------------\n");
+ printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
+ printf(" -----------------------------------------------------------------------------------------------------------------\n");
next = rb_first(&sched->sorted_atom_root);
@@ -1541,7 +1541,7 @@
next = rb_next(next);
}
- printf(" -----------------------------------------------------------------------------------------\n");
+ printf(" -----------------------------------------------------------------------------------------------------------------\n");
printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
(double)sched->all_runtime / 1e6, sched->all_count);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 25526d6..74db256 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -494,7 +494,7 @@
continue;
}
- tal.filtered = false;
+ tal.filtered = 0;
thread__find_addr_location(al.thread, machine, cpumode,
MAP__FUNCTION, ip, &tal);
@@ -1238,7 +1238,7 @@
for (i = 0; i < old_power_args_nr; i++)
*p++ = strdup(old_power_args[i]);
- for (j = 1; j < (unsigned int)argc; j++)
+ for (j = 0; j < (unsigned int)argc; j++)
*p++ = argv[j];
return cmd_record(rec_argc, rec_argv, NULL);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 76cd510..65aaa5b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -176,7 +176,7 @@
{
struct annotation *notes;
struct symbol *sym;
- int err;
+ int err = 0;
if (he == NULL || he->ms.sym == NULL ||
((top->sym_filter_entry == NULL ||
@@ -190,7 +190,9 @@
return;
ip = he->ms.map->map_ip(he->ms.map, ip);
- err = hist_entry__inc_addr_samples(he, counter, ip);
+
+ if (ui__has_annotation())
+ err = hist_entry__inc_addr_samples(he, counter, ip);
pthread_mutex_unlock(¬es->lock);
@@ -991,6 +993,16 @@
return record_parse_callchain_opt(opt, arg, unset);
}
+static int perf_top_config(const char *var, const char *value, void *cb)
+{
+ struct perf_top *top = cb;
+
+ if (!strcmp(var, "top.call-graph"))
+ return record_parse_callchain(value, &top->record_opts);
+
+ return perf_default_config(var, value, cb);
+}
+
static int
parse_percent_limit(const struct option *opt, const char *arg,
int unset __maybe_unused)
@@ -1115,6 +1127,8 @@
if (top.evlist == NULL)
return -ENOMEM;
+ perf_config(perf_top_config, &top);
+
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
usage_with_options(top_usage, options);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 896f270..f954c26 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -37,6 +37,10 @@
# define MADV_UNMERGEABLE 13
#endif
+#ifndef EFD_SEMAPHORE
+# define EFD_SEMAPHORE 1
+#endif
+
struct tp_field {
int offset;
union {
@@ -279,6 +283,11 @@
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches as soon as the ioctl beautifier
+ * gets rewritten to support all arches.
+ */
static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -286,6 +295,7 @@
}
#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
+#endif /* defined(__i386__) || defined(__x86_64__) */
static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
struct syscall_arg *arg);
@@ -815,7 +825,6 @@
P_SIGNUM(PIPE);
P_SIGNUM(ALRM);
P_SIGNUM(TERM);
- P_SIGNUM(STKFLT);
P_SIGNUM(CHLD);
P_SIGNUM(CONT);
P_SIGNUM(STOP);
@@ -831,6 +840,15 @@
P_SIGNUM(IO);
P_SIGNUM(PWR);
P_SIGNUM(SYS);
+#ifdef SIGEMT
+ P_SIGNUM(EMT);
+#endif
+#ifdef SIGSTKFLT
+ P_SIGNUM(STKFLT);
+#endif
+#ifdef SIGSWI
+ P_SIGNUM(SWI);
+#endif
default: break;
}
@@ -839,6 +857,10 @@
#define SCA_SIGNUM syscall_arg__scnprintf_signum
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches.
+ */
#define TCGETS 0x5401
static const char *tioctls[] = {
@@ -860,6 +882,7 @@
};
static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
+#endif /* defined(__i386__) || defined(__x86_64__) */
#define STRARRAY(arg, name, array) \
.arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
@@ -941,9 +964,16 @@
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches.
+ */
[1] = SCA_STRHEXARRAY, /* cmd */
[2] = SCA_HEX, /* arg */ },
.arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
+#else
+ [2] = SCA_HEX, /* arg */ }, },
+#endif
{ .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "linkat", .errmsg = true,
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index c48d449..c234182 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -59,6 +59,18 @@
CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
+ifndef NO_LIBELF
+ # for linking with debug library, run like:
+ # make DEBUG=1 LIBDW_DIR=/opt/libdw/
+ ifdef LIBDW_DIR
+ LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
+ LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+
+ FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
+ endif
+endif
+
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
@@ -147,7 +159,35 @@
libunwind \
on-exit \
stackprotector-all \
- timerfd
+ timerfd \
+ libdw-dwarf-unwind
+
+LIB_FEATURE_TESTS = \
+ dwarf \
+ glibc \
+ gtk2 \
+ libaudit \
+ libbfd \
+ libelf \
+ libnuma \
+ libperl \
+ libpython \
+ libslang \
+ libunwind \
+ libdw-dwarf-unwind
+
+VF_FEATURE_TESTS = \
+ backtrace \
+ fortify-source \
+ gtk2-infobar \
+ libelf-getphdrnum \
+ libelf-mmap \
+ libpython-version \
+ on-exit \
+ stackprotector-all \
+ timerfd \
+ libunwind-debug-frame \
+ bionic
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
@@ -161,17 +201,6 @@
$(foreach feat,$(CORE_FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
#
-# So here we detect whether test-all was rebuilt, to be able
-# to skip the print-out of the long features list if the file
-# existed before and after it was built:
-#
-ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all.bin),)
- test-all-failed := 1
-else
- test-all-failed := 0
-endif
-
-#
# Special fast-path for the 'all features are available' case:
#
$(call feature_check,all,$(MSG))
@@ -180,15 +209,6 @@
# Just in case the build freshly failed, make sure we print the
# feature matrix:
#
-ifeq ($(feature-all), 0)
- test-all-failed := 1
-endif
-
-ifeq ($(test-all-failed),1)
- $(info )
- $(info Auto-detecting system features:)
-endif
-
ifeq ($(feature-all), 1)
#
# test-all.c passed - just set all the core feature flags to 1:
@@ -199,27 +219,6 @@
$(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
endif
-#
-# Print the result of the feature test:
-#
-feature_print = $(eval $(feature_print_code)) $(info $(MSG))
-
-define feature_print_code
- ifeq ($(feature-$(1)), 1)
- MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
- else
- MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
- endif
-endef
-
-#
-# Only print out our features if we rebuilt the testcases or if a test failed:
-#
-ifeq ($(test-all-failed), 1)
- $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_print,$(feat)))
- $(info )
-endif
-
ifeq ($(feature-stackprotector-all), 1)
CFLAGS += -fstack-protector-all
endif
@@ -264,6 +263,7 @@
NO_DWARF := 1
NO_DEMANGLE := 1
NO_LIBUNWIND := 1
+ NO_LIBDW_DWARF_UNWIND := 1
else
ifeq ($(feature-libelf), 0)
ifeq ($(feature-glibc), 1)
@@ -282,13 +282,12 @@
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
endif
else
- # for linking with debug library, run like:
- # make DEBUG=1 LIBDW_DIR=/opt/libdw/
- ifdef LIBDW_DIR
- LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
- LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+ ifndef NO_LIBDW_DWARF_UNWIND
+ ifneq ($(feature-libdw-dwarf-unwind),1)
+ NO_LIBDW_DWARF_UNWIND := 1
+ msg := $(warning No libdw DWARF unwind found, Please install elfutils-devel/libdw-dev >= 0.158 and/or set LIBDW_DIR);
+ endif
endif
-
ifneq ($(feature-dwarf), 1)
msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
NO_DWARF := 1
@@ -324,25 +323,51 @@
ifndef NO_LIBUNWIND
ifneq ($(feature-libunwind), 1)
- msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
+ msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR);
NO_LIBUNWIND := 1
+ endif
+endif
+
+dwarf-post-unwind := 1
+dwarf-post-unwind-text := BUG
+
+# setup DWARF post unwinder
+ifdef NO_LIBUNWIND
+ ifdef NO_LIBDW_DWARF_UNWIND
+ msg := $(warning Disabling post unwind, no support found.);
+ dwarf-post-unwind := 0
else
- ifeq ($(ARCH),arm)
- $(call feature_check,libunwind-debug-frame)
- ifneq ($(feature-libunwind-debug-frame), 1)
- msg := $(warning No debug_frame support found in libunwind);
- CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
- endif
- else
- # non-ARM has no dwarf_find_debug_frame() function:
+ dwarf-post-unwind-text := libdw
+ endif
+else
+ dwarf-post-unwind-text := libunwind
+ # Enable libunwind support by default.
+ ifndef NO_LIBDW_DWARF_UNWIND
+ NO_LIBDW_DWARF_UNWIND := 1
+ endif
+endif
+
+ifeq ($(dwarf-post-unwind),1)
+ CFLAGS += -DHAVE_DWARF_UNWIND_SUPPORT
+else
+ NO_DWARF_UNWIND := 1
+endif
+
+ifndef NO_LIBUNWIND
+ ifeq ($(ARCH),arm)
+ $(call feature_check,libunwind-debug-frame)
+ ifneq ($(feature-libunwind-debug-frame), 1)
+ msg := $(warning No debug_frame support found in libunwind);
CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
endif
-
- CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
- EXTLIBS += $(LIBUNWIND_LIBS)
- CFLAGS += $(LIBUNWIND_CFLAGS)
- LDFLAGS += $(LIBUNWIND_LDFLAGS)
- endif # ifneq ($(feature-libunwind), 1)
+ else
+ # non-ARM has no dwarf_find_debug_frame() function:
+ CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+ endif
+ CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
+ EXTLIBS += $(LIBUNWIND_LIBS)
+ CFLAGS += $(LIBUNWIND_CFLAGS)
+ LDFLAGS += $(LIBUNWIND_LDFLAGS)
endif
ifndef NO_LIBAUDIT
@@ -478,7 +503,7 @@
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd
+ EXTLIBS += -lbfd -lz -liberty
endif
ifdef NO_DEMANGLE
@@ -602,3 +627,84 @@
plugindir=$(libdir)/traceevent/plugins
plugindir_SQ= $(subst ','\'',$(plugindir))
endif
+
+#
+# Print the result of the feature test:
+#
+feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
+
+define feature_print_status_code
+ ifeq ($(feature-$(1)), 1)
+ MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
+ else
+ MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+ endif
+endef
+
+feature_print_var = $(eval $(feature_print_var_code)) $(info $(MSG))
+define feature_print_var_code
+ MSG = $(shell printf '...%30s: %s' $(1) $($(1)))
+endef
+
+feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
+define feature_print_text_code
+ MSG = $(shell printf '...%30s: %s' $(1) $(2))
+endef
+
+PERF_FEATURES := $(foreach feat,$(LIB_FEATURE_TESTS),feature-$(feat)($(feature-$(feat))))
+PERF_FEATURES_FILE := $(shell touch $(OUTPUT)PERF-FEATURES; cat $(OUTPUT)PERF-FEATURES)
+
+ifeq ($(dwarf-post-unwind),1)
+ PERF_FEATURES += dwarf-post-unwind($(dwarf-post-unwind-text))
+endif
+
+# The $(display_lib) controls the default detection message
+# output. It's set if:
+# - detected features differes from stored features from
+# last build (in PERF-FEATURES file)
+# - one of the $(LIB_FEATURE_TESTS) is not detected
+# - VF is enabled
+
+ifneq ("$(PERF_FEATURES)","$(PERF_FEATURES_FILE)")
+ $(shell echo "$(PERF_FEATURES)" > $(OUTPUT)PERF-FEATURES)
+ display_lib := 1
+endif
+
+feature_check = $(eval $(feature_check_code))
+define feature_check_code
+ ifneq ($(feature-$(1)), 1)
+ display_lib := 1
+ endif
+endef
+
+$(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_check,$(feat)))
+
+ifeq ($(VF),1)
+ display_lib := 1
+ display_vf := 1
+endif
+
+ifeq ($(display_lib),1)
+ $(info )
+ $(info Auto-detecting system features:)
+ $(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_print_status,$(feat),))
+
+ ifeq ($(dwarf-post-unwind),1)
+ $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
+ endif
+endif
+
+ifeq ($(display_vf),1)
+ $(foreach feat,$(VF_FEATURE_TESTS),$(call feature_print_status,$(feat),))
+ $(info )
+ $(call feature_print_var,prefix)
+ $(call feature_print_var,bindir)
+ $(call feature_print_var,libdir)
+ $(call feature_print_var,sysconfdir)
+ $(call feature_print_var,LIBUNWIND_DIR)
+ $(call feature_print_var,LIBDW_DIR)
+endif
+
+ifeq ($(display_lib),1)
+ $(info )
+endif
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 12e5513..2da103c 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -26,7 +26,8 @@
test-libunwind-debug-frame.bin \
test-on-exit.bin \
test-stackprotector-all.bin \
- test-timerfd.bin
+ test-timerfd.bin \
+ test-libdw-dwarf-unwind.bin
CC := $(CROSS_COMPILE)gcc -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -121,7 +122,7 @@
$(BUILD) $(FLAGS_PYTHON_EMBED)
test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
test-liberty.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
@@ -141,6 +142,9 @@
test-timerfd.bin:
$(BUILD)
+test-libdw-dwarf-unwind.bin:
+ $(BUILD)
+
-include *.d
###############################
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index 9b8a544..fc37eb3 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -89,6 +89,10 @@
# include "test-stackprotector-all.c"
#undef main
+#define main main_test_libdw_dwarf_unwind
+# include "test-libdw-dwarf-unwind.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -111,6 +115,7 @@
main_test_libnuma();
main_test_timerfd();
main_test_stackprotector_all();
+ main_test_libdw_dwarf_unwind();
return 0;
}
diff --git a/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c b/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c
new file mode 100644
index 0000000..f676a3f
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c
@@ -0,0 +1,13 @@
+
+#include <elfutils/libdwfl.h>
+
+int main(void)
+{
+ /*
+ * This function is guarded via: __nonnull_attribute__ (1, 2).
+ * Passing '1' as arguments value. This code is never executed,
+ * only compiled.
+ */
+ dwfl_thread_getframes((void *) 1, (void *) 1, NULL);
+ return 0;
+}
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 63a0e6f..a28dca2 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -18,7 +18,7 @@
Performance counters are accessed via special file descriptors.
There's one file descriptor per virtual counter used.
-The special file descriptor is opened via the perf_event_open()
+The special file descriptor is opened via the sys_perf_event_open()
system call:
int sys_perf_event_open(struct perf_event_attr *hw_event_uptr,
@@ -82,7 +82,7 @@
If 'raw_type' is 0, then the 'type' field says what kind of counter
this is, with the following encoding:
-enum perf_event_types {
+enum perf_type_id {
PERF_TYPE_HARDWARE = 0,
PERF_TYPE_SOFTWARE = 1,
PERF_TYPE_TRACEPOINT = 2,
@@ -95,7 +95,7 @@
* Generalized performance counter event types, used by the hw_event.event_id
* parameter of the sys_perf_event_open() syscall:
*/
-enum hw_event_ids {
+enum perf_hw_id {
/*
* Common hardware events, generalized by the kernel:
*/
@@ -129,7 +129,7 @@
* physical and sw events of the kernel (and allow the profiling of them as
* well):
*/
-enum sw_event_ids {
+enum perf_sw_ids {
PERF_COUNT_SW_CPU_CLOCK = 0,
PERF_COUNT_SW_TASK_CLOCK = 1,
PERF_COUNT_SW_PAGE_FAULTS = 2,
@@ -230,7 +230,7 @@
The 'comm' bit allows tracking of process comm data on process creation.
This too is recorded in the ring-buffer (see below).
-The 'pid' parameter to the perf_event_open() system call allows the
+The 'pid' parameter to the sys_perf_event_open() system call allows the
counter to be specific to a task:
pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -260,7 +260,7 @@
The 'group_fd' parameter allows counter "groups" to be set up. A
counter group has one counter which is the group "leader". The leader
-is created first, with group_fd = -1 in the perf_event_open call
+is created first, with group_fd = -1 in the sys_perf_event_open call
that creates it. The rest of the group members are created
subsequently, with group_fd giving the fd of the group leader.
(A single counter on its own is created with group_fd = -1 and is
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 496e2ab..ae3a576 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -123,7 +123,7 @@
__perfcomp_colon "$evts" "$cur"
# List subcommands for 'perf kvm'
elif [[ $prev == "kvm" ]]; then
- subcmds="top record report diff buildid-list stat"
+ subcmds=$($cmd $prev --list-cmds)
__perfcomp_colon "$subcmds" "$cur"
# List long option names
elif [[ $cur == --* ]]; then
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index e84fa26..e18a8b5 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -12,6 +12,9 @@
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 336
#endif
+#ifndef __NR_futex
+# define __NR_futex 240
+#endif
#endif
#if defined(__x86_64__)
@@ -23,6 +26,9 @@
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
+#ifndef __NR_futex
+# define __NR_futex 202
+#endif
#endif
#ifdef __powerpc__
@@ -251,12 +257,14 @@
enum perf_call_graph_mode {
CALLCHAIN_NONE,
CALLCHAIN_FP,
- CALLCHAIN_DWARF
+ CALLCHAIN_DWARF,
+ CALLCHAIN_MAX
};
struct record_opts {
struct target target;
int call_graph;
+ bool call_graph_enabled;
bool group;
bool inherit_stat;
bool no_buffering;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 1e67437..b11bf8a 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -115,6 +115,14 @@
.desc = "Test parsing with no sample_id_all bit set",
.func = test__parse_no_sample_id_all,
},
+#if defined(__x86_64__) || defined(__i386__)
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ {
+ .desc = "Test dwarf unwind",
+ .func = test__dwarf_unwind,
+ },
+#endif
+#endif
{
.func = NULL,
},
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
new file mode 100644
index 0000000..c059ee8
--- /dev/null
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -0,0 +1,144 @@
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "tests.h"
+#include "debug.h"
+#include "machine.h"
+#include "event.h"
+#include "unwind.h"
+#include "perf_regs.h"
+#include "map.h"
+#include "thread.h"
+
+static int mmap_handler(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_mmap_event(machine, event, NULL);
+}
+
+static int init_live_machine(struct machine *machine)
+{
+ union perf_event event;
+ pid_t pid = getpid();
+
+ return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
+ mmap_handler, machine, true);
+}
+
+#define MAX_STACK 6
+
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+ unsigned long *cnt = (unsigned long *) arg;
+ char *symbol = entry->sym ? entry->sym->name : NULL;
+ static const char *funcs[MAX_STACK] = {
+ "test__arch_unwind_sample",
+ "unwind_thread",
+ "krava_3",
+ "krava_2",
+ "krava_1",
+ "test__dwarf_unwind"
+ };
+
+ if (*cnt >= MAX_STACK) {
+ pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
+ return -1;
+ }
+
+ if (!symbol) {
+ pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
+ entry->ip);
+ return -1;
+ }
+
+ pr_debug("got: %s 0x%" PRIx64 "\n", symbol, entry->ip);
+ return strcmp((const char *) symbol, funcs[(*cnt)++]);
+}
+
+__attribute__ ((noinline))
+static int unwind_thread(struct thread *thread, struct machine *machine)
+{
+ struct perf_sample sample;
+ unsigned long cnt = 0;
+ int err = -1;
+
+ memset(&sample, 0, sizeof(sample));
+
+ if (test__arch_unwind_sample(&sample, thread)) {
+ pr_debug("failed to get unwind sample\n");
+ goto out;
+ }
+
+ err = unwind__get_entries(unwind_entry, &cnt, machine, thread,
+ &sample, MAX_STACK);
+ if (err)
+ pr_debug("unwind failed\n");
+ else if (cnt != MAX_STACK) {
+ pr_debug("got wrong number of stack entries %lu != %d\n",
+ cnt, MAX_STACK);
+ err = -1;
+ }
+
+ out:
+ free(sample.user_stack.data);
+ free(sample.user_regs.regs);
+ return err;
+}
+
+__attribute__ ((noinline))
+static int krava_3(struct thread *thread, struct machine *machine)
+{
+ return unwind_thread(thread, machine);
+}
+
+__attribute__ ((noinline))
+static int krava_2(struct thread *thread, struct machine *machine)
+{
+ return krava_3(thread, machine);
+}
+
+__attribute__ ((noinline))
+static int krava_1(struct thread *thread, struct machine *machine)
+{
+ return krava_2(thread, machine);
+}
+
+int test__dwarf_unwind(void)
+{
+ struct machines machines;
+ struct machine *machine;
+ struct thread *thread;
+ int err = -1;
+
+ machines__init(&machines);
+
+ machine = machines__find(&machines, HOST_KERNEL_ID);
+ if (!machine) {
+ pr_err("Could not get machine\n");
+ return -1;
+ }
+
+ if (init_live_machine(machine)) {
+ pr_err("Could not init machine\n");
+ goto out;
+ }
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ thread = machine__find_thread(machine, getpid(), getpid());
+ if (!thread) {
+ pr_err("Could not get thread\n");
+ goto out;
+ }
+
+ err = krava_1(thread, machine);
+
+ out:
+ machine__delete_threads(machine);
+ machine__exit(machine);
+ machines__exit(&machines);
+ return err;
+}
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 2b6519e..7ccbc7b 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -101,6 +101,7 @@
.mmap = {
.header = { .misc = PERF_RECORD_MISC_USER, },
.pid = fake_mmap_info[i].pid,
+ .tid = fake_mmap_info[i].pid,
.start = fake_mmap_info[i].start,
.len = 0x1000ULL,
.pgoff = 0ULL,
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 00544b8..5daeae1 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -27,6 +27,7 @@
make_no_demangle := NO_DEMANGLE=1
make_no_libelf := NO_LIBELF=1
make_no_libunwind := NO_LIBUNWIND=1
+make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
make_no_backtrace := NO_BACKTRACE=1
make_no_libnuma := NO_LIBNUMA=1
make_no_libaudit := NO_LIBAUDIT=1
@@ -35,8 +36,9 @@
make_cscope := cscope
make_help := help
make_doc := doc
-make_perf_o := perf.o
-make_util_map_o := util/map.o
+make_perf_o := perf.o
+make_util_map_o := util/map.o
+make_util_pmu_bison_o := util/pmu-bison.o
make_install := install
make_install_bin := install-bin
make_install_doc := install-doc
@@ -49,6 +51,7 @@
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
+make_minimal += NO_LIBDW_DWARF_UNWIND=1
# $(run) contains all available tests
run := make_pure
@@ -65,6 +68,7 @@
run += make_no_demangle
run += make_no_libelf
run += make_no_libunwind
+run += make_no_libdw_dwarf_unwind
run += make_no_backtrace
run += make_no_libnuma
run += make_no_libaudit
@@ -73,6 +77,7 @@
run += make_doc
run += make_perf_o
run += make_util_map_o
+run += make_util_pmu_bison_o
run += make_install
run += make_install_bin
# FIXME 'install-*' commented out till they're fixed
@@ -113,8 +118,9 @@
test_make_python_perf_so := test -f $(PERF)/python/perf.so
-test_make_perf_o := test -f $(PERF)/perf.o
-test_make_util_map_o := test -f $(PERF)/util/map.o
+test_make_perf_o := test -f $(PERF)/perf.o
+test_make_util_map_o := test -f $(PERF)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
define test_dest_files
for file in $(1); do \
@@ -167,13 +173,10 @@
test_make_install_pdf := $(test_ok)
test_make_install_pdf_O := $(test_ok)
-# Kbuild tests only
-#test_make_python_perf_so_O := test -f $$TMP/tools/perf/python/perf.so
-#test_make_perf_o_O := test -f $$TMP/tools/perf/perf.o
-#test_make_util_map_o_O := test -f $$TMP/tools/perf/util/map.o
-
-test_make_perf_o_O := true
-test_make_util_map_o_O := true
+test_make_python_perf_so_O := test -f $$TMP_O/python/perf.so
+test_make_perf_o_O := test -f $$TMP_O/perf.o
+test_make_util_map_o_O := test -f $$TMP_O/util/map.o
+test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
test_default = test -x $(PERF)/perf
test = $(if $(test_$1),$(test_$1),$(test_default))
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 4db0ae6..8605ff5 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -2,7 +2,7 @@
#include "parse-events.h"
#include "evsel.h"
#include "evlist.h"
-#include "fs.h"
+#include <api/fs/fs.h>
#include <api/fs/debugfs.h>
#include "tests.h"
#include <linux/hw_breakpoint.h>
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 1b67720..0014d3c 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -22,8 +22,8 @@
} while (0)
static bool samples_same(const struct perf_sample *s1,
- const struct perf_sample *s2, u64 type, u64 regs_user,
- u64 read_format)
+ const struct perf_sample *s2,
+ u64 type, u64 read_format)
{
size_t i;
@@ -95,8 +95,9 @@
}
if (type & PERF_SAMPLE_REGS_USER) {
- size_t sz = hweight_long(regs_user) * sizeof(u64);
+ size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
+ COMP(user_regs.mask);
COMP(user_regs.abi);
if (s1->user_regs.abi &&
(!s1->user_regs.regs || !s2->user_regs.regs ||
@@ -174,6 +175,7 @@
.branch_stack = &branch_stack.branch_stack,
.user_regs = {
.abi = PERF_SAMPLE_REGS_ABI_64,
+ .mask = sample_regs_user,
.regs = user_regs,
},
.user_stack = {
@@ -201,8 +203,7 @@
sample.read.one.id = 99;
}
- sz = perf_event__sample_event_size(&sample, sample_type,
- sample_regs_user, read_format);
+ sz = perf_event__sample_event_size(&sample, sample_type, read_format);
bufsz = sz + 4096; /* Add a bit for overrun checking */
event = malloc(bufsz);
if (!event) {
@@ -215,8 +216,7 @@
event->header.misc = 0;
event->header.size = sz;
- err = perf_event__synthesize_sample(event, sample_type,
- sample_regs_user, read_format,
+ err = perf_event__synthesize_sample(event, sample_type, read_format,
&sample, false);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
@@ -244,8 +244,7 @@
goto out_free;
}
- if (!samples_same(&sample, &sample_out, sample_type,
- sample_regs_user, read_format)) {
+ if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
pr_debug("parsing failed for sample_type %#"PRIx64"\n",
sample_type);
goto out_free;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index e0ac713..a24795c 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -40,5 +40,14 @@
int test__sample_parsing(void);
int test__keep_tracking(void);
int test__parse_no_sample_id_all(void);
+int test__dwarf_unwind(void);
+#if defined(__x86_64__) || defined(__i386__)
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+int test__arch_unwind_sample(struct perf_sample *sample,
+ struct thread *thread);
+#endif
+#endif
#endif /* TESTS_H */
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index b720b92..7ec871a 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -587,95 +587,52 @@
bool current_entry;
};
-static int __hpp__color_callchain(struct hpp_arg *arg)
+static int __hpp__overhead_callback(struct perf_hpp *hpp, bool front)
{
- if (!symbol_conf.use_callchain)
- return 0;
-
- slsmg_printf("%c ", arg->folded_sign);
- return 2;
-}
-
-static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
- u64 (*get_field)(struct hist_entry *),
- int (*callchain_cb)(struct hpp_arg *))
-{
- int ret = 0;
- double percent = 0.0;
- struct hists *hists = he->hists;
struct hpp_arg *arg = hpp->ptr;
- if (hists->stats.total_period)
- percent = 100.0 * get_field(he) / hists->stats.total_period;
+ if (arg->current_entry && arg->b->navkeypressed)
+ ui_browser__set_color(arg->b, HE_COLORSET_SELECTED);
+ else
+ ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
+
+ if (front) {
+ if (!symbol_conf.use_callchain)
+ return 0;
+
+ slsmg_printf("%c ", arg->folded_sign);
+ return 2;
+ }
+
+ return 0;
+}
+
+static int __hpp__color_callback(struct perf_hpp *hpp, bool front __maybe_unused)
+{
+ struct hpp_arg *arg = hpp->ptr;
+
+ if (!arg->current_entry || !arg->b->navkeypressed)
+ ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
+ return 0;
+}
+
+static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+ struct hpp_arg *arg = hpp->ptr;
+ int ret;
+ va_list args;
+ double percent;
+
+ va_start(args, fmt);
+ percent = va_arg(args, double);
+ va_end(args);
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
- if (callchain_cb)
- ret += callchain_cb(arg);
-
- ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+ ret = scnprintf(hpp->buf, hpp->size, fmt, percent);
slsmg_printf("%s", hpp->buf);
- if (symbol_conf.event_group) {
- int prev_idx, idx_delta;
- struct perf_evsel *evsel = hists_to_evsel(hists);
- struct hist_entry *pair;
- int nr_members = evsel->nr_members;
-
- if (nr_members <= 1)
- goto out;
-
- prev_idx = perf_evsel__group_idx(evsel);
-
- list_for_each_entry(pair, &he->pairs.head, pairs.node) {
- u64 period = get_field(pair);
- u64 total = pair->hists->stats.total_period;
-
- if (!total)
- continue;
-
- evsel = hists_to_evsel(pair->hists);
- idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members in the middle which
- * have no sample
- */
- ui_browser__set_percent_color(arg->b, 0.0,
- arg->current_entry);
- ret += scnprintf(hpp->buf, hpp->size,
- " %6.2f%%", 0.0);
- slsmg_printf("%s", hpp->buf);
- }
-
- percent = 100.0 * period / total;
- ui_browser__set_percent_color(arg->b, percent,
- arg->current_entry);
- ret += scnprintf(hpp->buf, hpp->size,
- " %6.2f%%", percent);
- slsmg_printf("%s", hpp->buf);
-
- prev_idx = perf_evsel__group_idx(evsel);
- }
-
- idx_delta = nr_members - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members at last which have no sample
- */
- ui_browser__set_percent_color(arg->b, 0.0,
- arg->current_entry);
- ret += scnprintf(hpp->buf, hpp->size,
- " %6.2f%%", 0.0);
- slsmg_printf("%s", hpp->buf);
- }
- }
-out:
- if (!arg->current_entry || !arg->b->navkeypressed)
- ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
-
+ advance_hpp(hpp, ret);
return ret;
}
@@ -690,14 +647,15 @@
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
- return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
+ return __hpp__fmt(hpp, he, __hpp_get_##_field, _cb, " %6.2f%%", \
+ __hpp__slsmg_color_printf, true); \
}
-__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
-__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
+__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__overhead_callback)
+__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_us, period_us, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, __hpp__color_callback)
#undef __HPP_COLOR_PERCENT_FN
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 5b95c44..e395ef9 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -8,16 +8,24 @@
#define MAX_COLUMNS 32
-static int __percent_color_snprintf(char *buf, size_t size, double percent)
+static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
int ret = 0;
+ va_list args;
+ double percent;
const char *markup;
+ char *buf = hpp->buf;
+ size_t size = hpp->size;
+
+ va_start(args, fmt);
+ percent = va_arg(args, double);
+ va_end(args);
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, markup);
- ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
+ ret += scnprintf(buf + ret, size - ret, fmt, percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
@@ -25,66 +33,6 @@
return ret;
}
-
-static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
- u64 (*get_field)(struct hist_entry *))
-{
- int ret;
- double percent = 0.0;
- struct hists *hists = he->hists;
- struct perf_evsel *evsel = hists_to_evsel(hists);
-
- if (hists->stats.total_period)
- percent = 100.0 * get_field(he) / hists->stats.total_period;
-
- ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
-
- if (perf_evsel__is_group_event(evsel)) {
- int prev_idx, idx_delta;
- struct hist_entry *pair;
- int nr_members = evsel->nr_members;
-
- prev_idx = perf_evsel__group_idx(evsel);
-
- list_for_each_entry(pair, &he->pairs.head, pairs.node) {
- u64 period = get_field(pair);
- u64 total = pair->hists->stats.total_period;
-
- evsel = hists_to_evsel(pair->hists);
- idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members in the middle which
- * have no sample
- */
- ret += __percent_color_snprintf(hpp->buf + ret,
- hpp->size - ret,
- 0.0);
- }
-
- percent = 100.0 * period / total;
- ret += __percent_color_snprintf(hpp->buf + ret,
- hpp->size - ret,
- percent);
-
- prev_idx = perf_evsel__group_idx(evsel);
- }
-
- idx_delta = nr_members - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members at last which have no sample
- */
- ret += __percent_color_snprintf(hpp->buf + ret,
- hpp->size - ret,
- 0.0);
- }
- }
- return ret;
-}
-
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
@@ -95,7 +43,8 @@
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
- return __hpp__color_fmt(hpp, he, he_get_##_field); \
+ return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
+ __percent_color_snprintf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
@@ -216,7 +165,6 @@
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
- .ptr = hists_to_evsel(hists),
};
nr_cols = 0;
@@ -243,7 +191,7 @@
col_idx = 0;
perf_hpp__for_each_format(fmt) {
- fmt->header(fmt, &hpp);
+ fmt->header(fmt, &hpp, hists_to_evsel(hists));
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, ltrim(s),
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 78f4c92..0f403b83 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -8,16 +8,27 @@
/* hist period print (hpp) functions */
-typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
+#define hpp__call_print_fn(hpp, fn, fmt, ...) \
+({ \
+ int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
+ advance_hpp(hpp, __ret); \
+ __ret; \
+})
-static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
- u64 (*get_field)(struct hist_entry *),
- const char *fmt, hpp_snprint_fn print_fn,
- bool fmt_percent)
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, hpp_callback_fn callback,
+ const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
{
- int ret;
+ int ret = 0;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
+ char *buf = hpp->buf;
+ size_t size = hpp->size;
+
+ if (callback) {
+ ret = callback(hpp, true);
+ advance_hpp(hpp, ret);
+ }
if (fmt_percent) {
double percent = 0.0;
@@ -26,9 +37,9 @@
percent = 100.0 * get_field(he) /
hists->stats.total_period;
- ret = print_fn(hpp->buf, hpp->size, fmt, percent);
+ ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
} else
- ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
+ ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
@@ -52,16 +63,22 @@
* zero-fill group members in the middle which
* have no sample
*/
- ret += print_fn(hpp->buf + ret, hpp->size - ret,
- fmt, 0);
+ if (fmt_percent) {
+ ret += hpp__call_print_fn(hpp, print_fn,
+ fmt, 0.0);
+ } else {
+ ret += hpp__call_print_fn(hpp, print_fn,
+ fmt, 0ULL);
+ }
}
- if (fmt_percent)
- ret += print_fn(hpp->buf + ret, hpp->size - ret,
- fmt, 100.0 * period / total);
- else
- ret += print_fn(hpp->buf + ret, hpp->size - ret,
- fmt, period);
+ if (fmt_percent) {
+ ret += hpp__call_print_fn(hpp, print_fn, fmt,
+ 100.0 * period / total);
+ } else {
+ ret += hpp__call_print_fn(hpp, print_fn, fmt,
+ period);
+ }
prev_idx = perf_evsel__group_idx(evsel);
}
@@ -72,41 +89,87 @@
/*
* zero-fill group members at last which have no sample
*/
- ret += print_fn(hpp->buf + ret, hpp->size - ret,
- fmt, 0);
+ if (fmt_percent) {
+ ret += hpp__call_print_fn(hpp, print_fn,
+ fmt, 0.0);
+ } else {
+ ret += hpp__call_print_fn(hpp, print_fn,
+ fmt, 0ULL);
+ }
}
}
+
+ if (callback) {
+ int __ret = callback(hpp, false);
+
+ advance_hpp(hpp, __ret);
+ ret += __ret;
+ }
+
+ /*
+ * Restore original buf and size as it's where caller expects
+ * the result will be saved.
+ */
+ hpp->buf = buf;
+ hpp->size = size;
+
return ret;
}
#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
- struct perf_hpp *hpp) \
+ struct perf_hpp *hpp, \
+ struct perf_evsel *evsel) \
{ \
int len = _min_width; \
\
- if (symbol_conf.event_group) { \
- struct perf_evsel *evsel = hpp->ptr; \
- \
+ if (symbol_conf.event_group) \
len = max(len, evsel->nr_members * _unit_width); \
- } \
+ \
return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
}
#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
- struct perf_hpp *hpp __maybe_unused) \
+ struct perf_hpp *hpp __maybe_unused, \
+ struct perf_evsel *evsel) \
{ \
int len = _min_width; \
\
- if (symbol_conf.event_group) { \
- struct perf_evsel *evsel = hpp->ptr; \
- \
+ if (symbol_conf.event_group) \
len = max(len, evsel->nr_members * _unit_width); \
- } \
+ \
return len; \
}
+static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+ va_list args;
+ ssize_t ssize = hpp->size;
+ double percent;
+ int ret;
+
+ va_start(args, fmt);
+ percent = va_arg(args, double);
+ ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
+ va_end(args);
+
+ return (ret >= ssize) ? (ssize - 1) : ret;
+}
+
+static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+ va_list args;
+ ssize_t ssize = hpp->size;
+ int ret;
+
+ va_start(args, fmt);
+ ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
+ va_end(args);
+
+ return (ret >= ssize) ? (ssize - 1) : ret;
+}
+
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
@@ -116,8 +179,8 @@
static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
- percent_color_snprintf, true); \
+ return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
+ hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
@@ -125,8 +188,8 @@
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
- return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
- scnprintf, true); \
+ return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt, \
+ hpp_entry_scnprintf, true); \
}
#define __HPP_ENTRY_RAW_FN(_type, _field) \
@@ -139,7 +202,8 @@
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
- return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
+ return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt, \
+ hpp_entry_scnprintf, false); \
}
#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
@@ -263,15 +327,13 @@
struct perf_hpp_fmt *fmt;
struct sort_entry *se;
int i = 0, ret = 0;
- struct perf_hpp dummy_hpp = {
- .ptr = hists_to_evsel(hists),
- };
+ struct perf_hpp dummy_hpp;
perf_hpp__for_each_format(fmt) {
if (i)
ret += 2;
- ret += fmt->width(fmt, &dummy_hpp);
+ ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
}
list_for_each_entry(se, &hist_entry__sort_list, list)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 831fbb7..d59893e 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -306,12 +306,6 @@
return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
}
-static inline void advance_hpp(struct perf_hpp *hpp, int inc)
-{
- hpp->buf += inc;
- hpp->size -= inc;
-}
-
static int hist_entry__period_snprintf(struct perf_hpp *hpp,
struct hist_entry *he)
{
@@ -385,7 +379,6 @@
struct perf_hpp dummy_hpp = {
.buf = bf,
.size = sizeof(bf),
- .ptr = hists_to_evsel(hists),
};
bool first = true;
size_t linesz;
@@ -404,7 +397,7 @@
else
first = false;
- fmt->header(fmt, &dummy_hpp);
+ fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
fprintf(fp, "%s", bf);
}
@@ -449,7 +442,7 @@
else
first = false;
- width = fmt->width(fmt, &dummy_hpp);
+ width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
for (i = 0; i < width; i++)
fprintf(fp, ".");
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 469eb67..809b4c5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -8,6 +8,8 @@
*/
#include "util.h"
+#include "ui/ui.h"
+#include "sort.h"
#include "build-id.h"
#include "color.h"
#include "cache.h"
@@ -489,7 +491,7 @@
{
struct annotation *notes;
- if (sym == NULL || use_browser != 1 || !sort__has_sym)
+ if (sym == NULL)
return 0;
notes = symbol__annotation(sym);
@@ -1234,6 +1236,7 @@
struct dso *dso = map->dso;
char *filename;
const char *d_filename;
+ const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
@@ -1241,7 +1244,7 @@
int more = 0;
u64 len;
int width = 8;
- int namelen;
+ int namelen, evsel_name_len, graph_dotted_len;
filename = strdup(dso->long_name);
if (!filename)
@@ -1254,14 +1257,17 @@
len = symbol__size(sym);
namelen = strlen(d_filename);
+ evsel_name_len = strlen(evsel_name);
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
- printf(" %-*.*s| Source code & Disassembly of %s\n",
- width, width, "Percent", d_filename);
- printf("-%-*.*s-------------------------------------\n",
- width+namelen, width+namelen, graph_dotted_line);
+ printf(" %-*.*s| Source code & Disassembly of %s for %s\n",
+ width, width, "Percent", d_filename, evsel_name);
+
+ graph_dotted_len = width + namelen + evsel_name_len;
+ printf("-%-*.*s-----------------------------------------\n",
+ graph_dotted_len, graph_dotted_len, graph_dotted_line);
if (verbose)
symbol__annotate_hits(sym, evsel);
@@ -1399,3 +1405,8 @@
{
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
}
+
+bool ui__has_annotation(void)
+{
+ return use_browser == 1 && sort__has_sym;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index b2aef59..56ad4f5 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -151,6 +151,8 @@
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
void disasm__purge(struct list_head *head);
+bool ui__has_annotation(void);
+
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines);
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index a9b48c4..7fe4994 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -1,5 +1,5 @@
#include "util.h"
-#include "fs.h"
+#include <api/fs/fs.h>
#include "../perf.h"
#include "cpumap.h"
#include <assert.h>
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 4045d08..64453d6 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -45,8 +45,8 @@
debuglink--;
if (*debuglink == '/')
debuglink++;
- filename__read_debuglink(dso->long_name, debuglink,
- size - (debuglink - filename));
+ ret = filename__read_debuglink(dso->long_name, debuglink,
+ size - (debuglink - filename));
}
break;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index cd7d6f0..ab06f1c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -102,6 +102,16 @@
char name[0];
};
+/* dso__for_each_symbol - iterate over the symbols of given type
+ *
+ * @dso: the 'struct dso *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * @type: the 'enum map_type' type of symbols
+ */
+#define dso__for_each_symbol(dso, pos, n, type) \
+ symbols__for_each_entry(&(dso)->symbols[(type)], pos, n)
+
static inline void dso__set_loaded(struct dso *dso, enum map_type type)
{
dso->loaded |= (1 << type);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index b0f3ca8..9d12aa6 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include "event.h"
#include "debug.h"
+#include "hist.h"
#include "machine.h"
#include "sort.h"
#include "string.h"
@@ -94,14 +95,10 @@
static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
- int full,
perf_event__handler_t process,
struct machine *machine)
{
- char filename[PATH_MAX];
size_t size;
- DIR *tasks;
- struct dirent dirent, *next;
pid_t tgid;
memset(&event->comm, 0, sizeof(event->comm));
@@ -124,57 +121,37 @@
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
machine->id_hdr_size);
- if (!full) {
- event->comm.tid = pid;
+ event->comm.tid = pid;
- if (process(tool, event, &synth_sample, machine) != 0)
- return -1;
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
- goto out;
- }
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(filename, sizeof(filename), "%s/proc/%d/task",
- machine->root_dir, pid);
-
- tasks = opendir(filename);
- if (tasks == NULL) {
- pr_debug("couldn't open %s\n", filename);
- return 0;
- }
-
- while (!readdir_r(tasks, &dirent, &next) && next) {
- char *end;
- pid = strtol(dirent.d_name, &end, 10);
- if (*end)
- continue;
-
- /* already have tgid; jut want to update the comm */
- (void) perf_event__get_comm_tgid(pid, event->comm.comm,
- sizeof(event->comm.comm));
-
- size = strlen(event->comm.comm) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- memset(event->comm.comm + size, 0, machine->id_hdr_size);
- event->comm.header.size = (sizeof(event->comm) -
- (sizeof(event->comm.comm) - size) +
- machine->id_hdr_size);
-
- event->comm.tid = pid;
-
- if (process(tool, event, &synth_sample, machine) != 0) {
- tgid = -1;
- break;
- }
- }
-
- closedir(tasks);
out:
return tgid;
}
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ pid_t tgid, perf_event__handler_t process,
+ struct machine *machine)
+{
+ memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
+
+ /* this is really a clone event but we use fork to synthesize it */
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ event->fork.pid = tgid;
+ event->fork.tid = pid;
+ event->fork.header.type = PERF_RECORD_FORK;
+
+ event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
+
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
+
+ return 0;
+}
+
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
@@ -324,17 +301,71 @@
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
+ union perf_event *fork_event,
pid_t pid, int full,
perf_event__handler_t process,
struct perf_tool *tool,
struct machine *machine, bool mmap_data)
{
- pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+ char filename[PATH_MAX];
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ /* special case: only send one comm event using passed in pid */
+ if (!full) {
+ tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+ process, machine);
+
+ if (tgid == -1)
+ return -1;
+
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ }
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!readdir_r(tasks, &dirent, &next) && next) {
+ char *end;
+ int rc = 0;
+ pid_t _pid;
+
+ _pid = strtol(dirent.d_name, &end, 10);
+ if (*end)
+ continue;
+
+ tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
+ process, machine);
+ if (tgid == -1)
+ return -1;
+
+ if (_pid == pid) {
+ /* process the parent's maps too */
+ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ } else {
+ /* only fork the tid's map, to save time */
+ rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
process, machine);
- if (tgid == -1)
- return -1;
- return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data);
+ }
+
+ if (rc)
+ return rc;
+ }
+
+ closedir(tasks);
+ return 0;
}
int perf_event__synthesize_thread_map(struct perf_tool *tool,
@@ -343,7 +374,7 @@
struct machine *machine,
bool mmap_data)
{
- union perf_event *comm_event, *mmap_event;
+ union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1, thread, j;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
@@ -354,9 +385,14 @@
if (mmap_event == NULL)
goto out_free_comm;
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
threads->map[thread], 0,
process, tool, machine,
mmap_data)) {
@@ -382,6 +418,7 @@
/* if not, generate events for it */
if (need_leader &&
__event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
comm_event->comm.pid, 0,
process, tool, machine,
mmap_data)) {
@@ -390,6 +427,8 @@
}
}
}
+ free(fork_event);
+out_free_mmap:
free(mmap_event);
out_free_comm:
free(comm_event);
@@ -404,9 +443,12 @@
DIR *proc;
char proc_path[PATH_MAX];
struct dirent dirent, *next;
- union perf_event *comm_event, *mmap_event;
+ union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1;
+ if (machine__is_default_guest(machine))
+ return 0;
+
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
@@ -415,14 +457,15 @@
if (mmap_event == NULL)
goto out_free_comm;
- if (machine__is_default_guest(machine))
- return 0;
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
proc = opendir(proc_path);
if (proc == NULL)
- goto out_free_mmap;
+ goto out_free_fork;
while (!readdir_r(proc, &dirent, &next) && next) {
char *end;
@@ -434,12 +477,14 @@
* We may race with exiting thread, so don't stop just because
* one thread couldn't be synthesized.
*/
- __event__synthesize_thread(comm_event, mmap_event, pid, 1,
- process, tool, machine, mmap_data);
+ __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
+ 1, process, tool, machine, mmap_data);
}
err = 0;
closedir(proc);
+out_free_fork:
+ free(fork_event);
out_free_mmap:
free(mmap_event);
out_free_comm:
@@ -661,7 +706,7 @@
al->thread = thread;
al->addr = addr;
al->cpumode = cpumode;
- al->filtered = false;
+ al->filtered = 0;
if (machine == NULL) {
al->map = NULL;
@@ -687,11 +732,11 @@
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
!perf_guest)
- al->filtered = true;
+ al->filtered |= (1 << HIST_FILTER__GUEST);
if ((cpumode == PERF_RECORD_MISC_USER ||
cpumode == PERF_RECORD_MISC_KERNEL) &&
!perf_host)
- al->filtered = true;
+ al->filtered |= (1 << HIST_FILTER__HOST);
return;
}
@@ -748,9 +793,6 @@
if (thread == NULL)
return -1;
- if (thread__is_filtered(thread))
- goto out_filtered;
-
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
/*
* Have we already created the kernel maps for this machine?
@@ -768,6 +810,10 @@
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
+
+ if (thread__is_filtered(thread))
+ al->filtered |= (1 << HIST_FILTER__THREAD);
+
al->sym = NULL;
al->cpu = sample->cpu;
@@ -779,8 +825,9 @@
dso->short_name) ||
(dso->short_name != dso->long_name &&
strlist__has_entry(symbol_conf.dso_list,
- dso->long_name)))))
- goto out_filtered;
+ dso->long_name))))) {
+ al->filtered |= (1 << HIST_FILTER__DSO);
+ }
al->sym = map__find_symbol(al->map, al->addr,
machine->symbol_filter);
@@ -788,12 +835,9 @@
if (symbol_conf.sym_list &&
(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
- al->sym->name)))
- goto out_filtered;
+ al->sym->name))) {
+ al->filtered |= (1 << HIST_FILTER__SYMBOL);
+ }
return 0;
-
-out_filtered:
- al->filtered = true;
- return 0;
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 851fa06..38457d4 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -85,6 +85,7 @@
struct regs_dump {
u64 abi;
+ u64 mask;
u64 *regs;
};
@@ -259,9 +260,9 @@
const char *perf_event__name(unsigned int id);
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 sample_regs_user, u64 read_format);
+ u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 sample_regs_user, u64 read_format,
+ u64 read_format,
const struct perf_sample *sample,
bool swapped);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 55407c5..5c28d82 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -500,6 +500,34 @@
return ret;
}
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
+{
+ bool function = perf_evsel__is_function_event(evsel);
+ struct perf_event_attr *attr = &evsel->attr;
+
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (!function) {
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ } else {
+ pr_info("Cannot use DWARF unwind for function trace event,"
+ " falling back to framepointers.\n");
+ }
+ }
+
+ if (function) {
+ pr_info("Disabling user space callchains for function trace event.\n");
+ attr->exclude_callchain_user = 1;
+ }
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -595,17 +623,8 @@
attr->mmap_data = track;
}
- if (opts->call_graph) {
- perf_evsel__set_sample_bit(evsel, CALLCHAIN);
-
- if (opts->call_graph == CALLCHAIN_DWARF) {
- perf_evsel__set_sample_bit(evsel, REGS_USER);
- perf_evsel__set_sample_bit(evsel, STACK_USER);
- attr->sample_regs_user = PERF_REGS_MASK;
- attr->sample_stack_user = opts->stack_dump_size;
- attr->exclude_callchain_user = 1;
- }
- }
+ if (opts->call_graph_enabled)
+ perf_evsel__config_callgraph(evsel, opts);
if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
@@ -1004,7 +1023,7 @@
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:
- pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
pid, cpus->map[cpu], group_fd, flags);
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
@@ -1013,7 +1032,7 @@
group_fd, flags);
if (FD(evsel, cpu, thread) < 0) {
err = -errno;
- pr_debug2("perf_event_open failed, error %d\n",
+ pr_debug2("sys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
@@ -1220,7 +1239,7 @@
memset(data, 0, sizeof(*data));
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
- data->period = 1;
+ data->period = evsel->attr.sample_period;
data->weight = 0;
if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -1396,10 +1415,11 @@
array++;
if (data->user_regs.abi) {
- u64 regs_user = evsel->attr.sample_regs_user;
+ u64 mask = evsel->attr.sample_regs_user;
- sz = hweight_long(regs_user) * sizeof(u64);
+ sz = hweight_long(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
+ data->user_regs.mask = mask;
data->user_regs.regs = (u64 *)array;
array = (void *)array + sz;
}
@@ -1451,7 +1471,7 @@
}
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 sample_regs_user, u64 read_format)
+ u64 read_format)
{
size_t sz, result = sizeof(struct sample_event);
@@ -1517,7 +1537,7 @@
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample_regs_user) * sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -1546,7 +1566,7 @@
}
int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 sample_regs_user, u64 read_format,
+ u64 read_format,
const struct perf_sample *sample,
bool swapped)
{
@@ -1687,7 +1707,7 @@
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
*array++ = sample->user_regs.abi;
- sz = hweight_long(sample_regs_user) * sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
memcpy(array, sample->user_regs.regs, sz);
array = (void *)array + sz;
} else {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index f1b3256..0c9926c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -315,6 +315,24 @@
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
}
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+ return evsel->name &&
+ !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
struct perf_attr_details {
bool freq;
bool verbose;
diff --git a/tools/perf/util/fs.h b/tools/perf/util/fs.h
deleted file mode 100644
index 5e09ce1..0000000
--- a/tools/perf/util/fs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __PERF_FS
-#define __PERF_FS
-
-const char *sysfs__mountpoint(void);
-const char *procfs__mountpoint(void);
-
-#endif /* __PERF_FS */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index e4e6249..f38590d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -13,13 +13,6 @@
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he);
-enum hist_filter {
- HIST_FILTER__DSO,
- HIST_FILTER__THREAD,
- HIST_FILTER__PARENT,
- HIST_FILTER__SYMBOL,
-};
-
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
.min_percent = 0.5,
@@ -290,7 +283,7 @@
if (he->branch_info) {
/*
* This branch info is (a part of) allocated from
- * machine__resolve_bstack() and will be freed after
+ * sample__resolve_bstack() and will be freed after
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
@@ -369,7 +362,7 @@
he_stat__add_period(&he->stat, period, weight);
/*
- * This mem info was allocated from machine__resolve_mem
+ * This mem info was allocated from sample__resolve_mem
* and will not be used anymore.
*/
zfree(&entry->mem_info);
@@ -429,7 +422,7 @@
.weight = weight,
},
.parent = sym_parent,
- .filtered = symbol__parent_filter(sym_parent),
+ .filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
.mem_info = mi,
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a59743f..1f1f513 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -14,6 +14,15 @@
struct addr_location;
struct symbol;
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+ HIST_FILTER__PARENT,
+ HIST_FILTER__SYMBOL,
+ HIST_FILTER__GUEST,
+ HIST_FILTER__HOST,
+};
+
/*
* The kernel collects the number of events it couldn't send in a stretch and
* when possible sends this number in a PERF_RECORD_LOST event. The number of
@@ -132,8 +141,10 @@
};
struct perf_hpp_fmt {
- int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
- int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+ int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
+ int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -166,6 +177,20 @@
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
+typedef u64 (*hpp_field_fn)(struct hist_entry *he);
+typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
+typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
+
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, hpp_callback_fn callback,
+ const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent);
+
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+ hpp->buf += inc;
+ hpp->size -= inc;
+}
+
static inline size_t perf_hpp__use_color(void)
{
return !symbol_conf.field_sep;
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 45cf10a..dadfa7e 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -87,13 +87,15 @@
return num;
}
+typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
+
/*
* Find the first set bit in a memory region.
*/
static inline unsigned long
find_first_bit(const unsigned long *addr, unsigned long size)
{
- const unsigned long *p = addr;
+ long_alias_t *p = (long_alias_t *) addr;
unsigned long result = 0;
unsigned long tmp;
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
deleted file mode 100644
index 201f573..0000000
--- a/tools/perf/util/include/linux/hash.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "../../../../include/linux/hash.h"
-
-#ifndef PERF_HASH_H
-#define PERF_HASH_H
-#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index d8c927c..9844c31 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -94,12 +94,6 @@
return (i >= ssize) ? (ssize - 1) : i;
}
-static inline unsigned long
-simple_strtoul(const char *nptr, char **endptr, int base)
-{
- return strtoul(nptr, endptr, base);
-}
-
int eprintf(int level,
const char *fmt, ...) __attribute__((format(printf, 2, 3)));
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
index 1d928a0..bfe0a2a 100644
--- a/tools/perf/util/include/linux/list.h
+++ b/tools/perf/util/include/linux/list.h
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/prefetch.h>
#include "../../../../include/linux/list.h"
diff --git a/tools/perf/util/include/linux/magic.h b/tools/perf/util/include/linux/magic.h
deleted file mode 100644
index 07d63cf..0000000
--- a/tools/perf/util/include/linux/magic.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _PERF_LINUX_MAGIC_H_
-#define _PERF_LINUX_MAGIC_H_
-
-#ifndef DEBUGFS_MAGIC
-#define DEBUGFS_MAGIC 0x64626720
-#endif
-
-#ifndef SYSFS_MAGIC
-#define SYSFS_MAGIC 0x62656572
-#endif
-
-#ifndef PROC_SUPER_MAGIC
-#define PROC_SUPER_MAGIC 0x9fa0
-#endif
-
-#endif
diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h
deleted file mode 100644
index 7841e48..0000000
--- a/tools/perf/util/include/linux/prefetch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef PERF_LINUX_PREFETCH_H
-#define PERF_LINUX_PREFETCH_H
-
-static inline void prefetch(void *a __attribute__((unused))) { }
-
-#endif
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c872991..a53cd0b8 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -327,9 +327,10 @@
return __machine__findnew_thread(machine, pid, tid, true);
}
-struct thread *machine__find_thread(struct machine *machine, pid_t tid)
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
{
- return __machine__findnew_thread(machine, 0, tid, false);
+ return __machine__findnew_thread(machine, pid, tid, false);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event,
@@ -1026,7 +1027,7 @@
}
thread = machine__findnew_thread(machine, event->mmap2.pid,
- event->mmap2.pid);
+ event->mmap2.tid);
if (thread == NULL)
goto out_problem;
@@ -1074,7 +1075,7 @@
}
thread = machine__findnew_thread(machine, event->mmap.pid,
- event->mmap.pid);
+ event->mmap.tid);
if (thread == NULL)
goto out_problem;
@@ -1114,7 +1115,9 @@
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
- struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
@@ -1140,7 +1143,9 @@
int machine__process_exit_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
- struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
@@ -1184,39 +1189,22 @@
return 0;
}
-static const u8 cpumodes[] = {
- PERF_RECORD_MISC_USER,
- PERF_RECORD_MISC_KERNEL,
- PERF_RECORD_MISC_GUEST_USER,
- PERF_RECORD_MISC_GUEST_KERNEL
-};
-#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
-
static void ip__resolve_ams(struct machine *machine, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
- size_t i;
- u8 m;
memset(&al, 0, sizeof(al));
+ /*
+ * We cannot use the header.misc hint to determine whether a
+ * branch stack address is user, kernel, guest, hypervisor.
+ * Branches may straddle the kernel/user/hypervisor boundaries.
+ * Thus, we have to try consecutively until we find a match
+ * or else, the symbol is unknown
+ */
+ thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
- for (i = 0; i < NCPUMODES; i++) {
- m = cpumodes[i];
- /*
- * We cannot use the header.misc hint to determine whether a
- * branch stack address is user, kernel, guest, hypervisor.
- * Branches may straddle the kernel/user/hypervisor boundaries.
- * Thus, we have to try consecutively until we find a match
- * or else, the symbol is unknown
- */
- thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
- ip, &al);
- if (al.sym)
- goto found;
- }
-found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;
@@ -1238,37 +1226,35 @@
ams->map = al.map;
}
-struct mem_info *machine__resolve_mem(struct machine *machine,
- struct thread *thr,
- struct perf_sample *sample,
- u8 cpumode)
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+ struct addr_location *al)
{
struct mem_info *mi = zalloc(sizeof(*mi));
if (!mi)
return NULL;
- ip__resolve_ams(machine, thr, &mi->iaddr, sample->ip);
- ip__resolve_data(machine, thr, cpumode, &mi->daddr, sample->addr);
+ ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
+ ip__resolve_data(al->machine, al->thread, al->cpumode,
+ &mi->daddr, sample->addr);
mi->data_src.val = sample->data_src;
return mi;
}
-struct branch_info *machine__resolve_bstack(struct machine *machine,
- struct thread *thr,
- struct branch_stack *bs)
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+ struct addr_location *al)
{
- struct branch_info *bi;
unsigned int i;
+ const struct branch_stack *bs = sample->branch_stack;
+ struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
- bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
- ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
- ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
+ ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
+ ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
bi[i].flags = bs->entries[i].flags;
}
return bi;
@@ -1326,7 +1312,7 @@
continue;
}
- al.filtered = false;
+ al.filtered = 0;
thread__find_addr_location(thread, machine, cpumode,
MAP__FUNCTION, ip, &al);
if (al.sym != NULL) {
@@ -1385,8 +1371,7 @@
return 0;
return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
- thread, evsel->attr.sample_regs_user,
- sample, max_stack);
+ thread, sample, max_stack);
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index f77e91e..c8c74a1 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -41,7 +41,8 @@
return machine->vmlinux_maps[type];
}
-struct thread *machine__find_thread(struct machine *machine, pid_t tid);
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+ pid_t tid);
int machine__process_comm_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -91,12 +92,10 @@
void machine__delete_threads(struct machine *machine);
void machine__delete(struct machine *machine);
-struct branch_info *machine__resolve_bstack(struct machine *machine,
- struct thread *thread,
- struct branch_stack *bs);
-struct mem_info *machine__resolve_mem(struct machine *machine,
- struct thread *thread,
- struct perf_sample *sample, u8 cpumode);
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+ struct addr_location *al);
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+ struct addr_location *al);
int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 257e513..f00f058 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -90,6 +90,16 @@
struct symbol;
+/* map__for_each_symbol - iterate over the symbols in the given map
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * Note: caller must ensure map->dso is not NULL (map is loaded).
+ */
+#define map__for_each_symbol(map, pos, n) \
+ dso__for_each_symbol(map->dso, pos, n, map->type)
+
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *map, enum map_type type,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index d248fca..1e15df1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1091,12 +1091,12 @@
static bool is_event_supported(u8 type, unsigned config)
{
bool ret = true;
+ int open_return;
struct perf_evsel *evsel;
struct perf_event_attr attr = {
.type = type,
.config = config,
.disabled = 1,
- .exclude_kernel = 1,
};
struct {
struct thread_map map;
@@ -1108,7 +1108,20 @@
evsel = perf_evsel__new(&attr);
if (evsel) {
- ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ open_return = perf_evsel__open(evsel, NULL, &tmap.map);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->attr.exclude_kernel = 1;
+ ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ }
perf_evsel__delete(evsel);
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index d22e3f80..bf48092 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -407,7 +407,9 @@
if (internal_help && !strcmp(arg + 2, "help"))
return usage_with_options_internal(usagestr, options, 0);
if (!strcmp(arg + 2, "list-opts"))
- return PARSE_OPT_LIST;
+ return PARSE_OPT_LIST_OPTS;
+ if (!strcmp(arg + 2, "list-cmds"))
+ return PARSE_OPT_LIST_SUBCMDS;
switch (parse_long_opt(ctx, arg + 2, options)) {
case -1:
return parse_options_usage(usagestr, options, arg + 2, 0);
@@ -433,25 +435,45 @@
return ctx->cpidx + ctx->argc;
}
-int parse_options(int argc, const char **argv, const struct option *options,
- const char * const usagestr[], int flags)
+int parse_options_subcommand(int argc, const char **argv, const struct option *options,
+ const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
perf_header__set_cmdline(argc, argv);
+ /* build usage string if it's not provided */
+ if (subcommands && !usagestr[0]) {
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "perf %s [<options>] {", argv[0]);
+ for (int i = 0; subcommands[i]; i++) {
+ if (i)
+ strbuf_addstr(&buf, "|");
+ strbuf_addstr(&buf, subcommands[i]);
+ }
+ strbuf_addstr(&buf, "}");
+
+ usagestr[0] = strdup(buf.buf);
+ strbuf_release(&buf);
+ }
+
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
exit(129);
case PARSE_OPT_DONE:
break;
- case PARSE_OPT_LIST:
+ case PARSE_OPT_LIST_OPTS:
while (options->type != OPTION_END) {
printf("--%s ", options->long_name);
options++;
}
exit(130);
+ case PARSE_OPT_LIST_SUBCMDS:
+ for (int i = 0; subcommands[i]; i++)
+ printf("%s ", subcommands[i]);
+ exit(130);
default: /* PARSE_OPT_UNKNOWN */
if (ctx.argv[0][1] == '-') {
error("unknown option `%s'", ctx.argv[0] + 2);
@@ -464,6 +486,13 @@
return parse_options_end(&ctx);
}
+int parse_options(int argc, const char **argv, const struct option *options,
+ const char * const usagestr[], int flags)
+{
+ return parse_options_subcommand(argc, argv, options, NULL,
+ (const char **) usagestr, flags);
+}
+
#define USAGE_OPTS_WIDTH 24
#define USAGE_GAP 2
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index cbf0149..d8dac8a 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -140,6 +140,11 @@
const struct option *options,
const char * const usagestr[], int flags);
+extern int parse_options_subcommand(int argc, const char **argv,
+ const struct option *options,
+ const char *const subcommands[],
+ const char *usagestr[], int flags);
+
extern NORETURN void usage_with_options(const char * const *usagestr,
const struct option *options);
@@ -148,7 +153,8 @@
enum {
PARSE_OPT_HELP = -1,
PARSE_OPT_DONE,
- PARSE_OPT_LIST,
+ PARSE_OPT_LIST_OPTS,
+ PARSE_OPT_LIST_SUBCMDS,
PARSE_OPT_UNKNOWN,
};
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
new file mode 100644
index 0000000..a3539ef
--- /dev/null
+++ b/tools/perf/util/perf_regs.c
@@ -0,0 +1,19 @@
+#include <errno.h>
+#include "perf_regs.h"
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
+{
+ int i, idx = 0;
+ u64 mask = regs->mask;
+
+ if (!(mask & (1 << id)))
+ return -EINVAL;
+
+ for (i = 0; i < id; i++) {
+ if (mask & (1 << i))
+ idx++;
+ }
+
+ *valp = regs->regs[idx];
+ return 0;
+}
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index a3d42cd..d6e8b6a8 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -1,8 +1,14 @@
#ifndef __PERF_REGS_H
#define __PERF_REGS_H
+#include "types.h"
+#include "event.h"
+
#ifdef HAVE_PERF_REGS_SUPPORT
#include <perf_regs.h>
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+
#else
#define PERF_REGS_MASK 0
@@ -10,5 +16,12 @@
{
return NULL;
}
+
+static inline int perf_reg_value(u64 *valp __maybe_unused,
+ struct regs_dump *regs __maybe_unused,
+ int id __maybe_unused)
+{
+ return 0;
+}
#endif /* HAVE_PERF_REGS_SUPPORT */
#endif /* __PERF_REGS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b752ecb..00a7dcb 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -3,7 +3,7 @@
#include <unistd.h>
#include <stdio.h>
#include <dirent.h>
-#include "fs.h"
+#include <api/fs/fs.h>
#include <locale.h>
#include "util.h"
#include "pmu.h"
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a8a9b6c..0d1542f 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -70,34 +70,32 @@
}
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
-static int convert_name_to_addr(struct perf_probe_event *pev,
- const char *exec);
static void clear_probe_trace_event(struct probe_trace_event *tev);
-static struct machine machine;
+static struct machine *host_machine;
/* Initialize symbol maps and path of vmlinux/modules */
-static int init_vmlinux(void)
+static int init_symbol_maps(bool user_only)
{
int ret;
symbol_conf.sort_by_name = true;
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
- else
- pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
ret = symbol__init();
if (ret < 0) {
pr_debug("Failed to init symbol map.\n");
goto out;
}
- ret = machine__init(&machine, "", HOST_KERNEL_ID);
- if (ret < 0)
- goto out;
+ if (host_machine || user_only) /* already initialized */
+ return 0;
- if (machine__create_kernel_maps(&machine) < 0) {
- pr_debug("machine__create_kernel_maps() failed.\n");
- goto out;
+ if (symbol_conf.vmlinux_name)
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+
+ host_machine = machine__new_host();
+ if (!host_machine) {
+ pr_debug("machine__new_host() failed.\n");
+ symbol__exit();
+ ret = -1;
}
out:
if (ret < 0)
@@ -105,21 +103,66 @@
return ret;
}
+static void exit_symbol_maps(void)
+{
+ if (host_machine) {
+ machine__delete(host_machine);
+ host_machine = NULL;
+ }
+ symbol__exit();
+}
+
static struct symbol *__find_kernel_function_by_name(const char *name,
struct map **mapp)
{
- return machine__find_kernel_function_by_name(&machine, name, mapp,
+ return machine__find_kernel_function_by_name(host_machine, name, mapp,
NULL);
}
+static struct symbol *__find_kernel_function(u64 addr, struct map **mapp)
+{
+ return machine__find_kernel_function(host_machine, addr, mapp, NULL);
+}
+
+static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
+{
+ /* kmap->ref_reloc_sym should be set if host_machine is initialized */
+ struct kmap *kmap;
+
+ if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0)
+ return NULL;
+
+ kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+ return kmap->ref_reloc_sym;
+}
+
+static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
+{
+ struct ref_reloc_sym *reloc_sym;
+ struct symbol *sym;
+ struct map *map;
+
+ /* ref_reloc_sym is just a label. Need a special fix*/
+ reloc_sym = kernel_get_ref_reloc_sym();
+ if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
+ return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
+ else {
+ sym = __find_kernel_function_by_name(name, &map);
+ if (sym)
+ return map->unmap_ip(map, sym->start) -
+ (reloc) ? 0 : map->reloc;
+ }
+ return 0;
+}
+
static struct map *kernel_get_module_map(const char *module)
{
struct rb_node *nd;
- struct map_groups *grp = &machine.kmaps;
+ struct map_groups *grp = &host_machine->kmaps;
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
- return machine__new_module(&machine, 0, module);
+ return machine__new_module(host_machine, 0, module);
if (!module)
module = "kernel";
@@ -141,7 +184,7 @@
const char *vmlinux_name;
if (module) {
- list_for_each_entry(dso, &machine.kernel_dsos, node) {
+ list_for_each_entry(dso, &host_machine->kernel_dsos, node) {
if (strncmp(dso->short_name + 1, module,
dso->short_name_len - 2) == 0)
goto found;
@@ -150,7 +193,7 @@
return NULL;
}
- map = machine.vmlinux_maps[MAP__FUNCTION];
+ map = host_machine->vmlinux_maps[MAP__FUNCTION];
dso = map->dso;
vmlinux_name = symbol_conf.vmlinux_name;
@@ -173,20 +216,6 @@
return (dso) ? dso->long_name : NULL;
}
-static int init_user_exec(void)
-{
- int ret = 0;
-
- symbol_conf.try_vmlinux_path = false;
- symbol_conf.sort_by_name = true;
- ret = symbol__init();
-
- if (ret < 0)
- pr_debug("Failed to init symbol map.\n");
-
- return ret;
-}
-
static int convert_exec_to_group(const char *exec, char **result)
{
char *ptr1, *ptr2, *exec_copy;
@@ -218,32 +247,23 @@
return ret;
}
-static int convert_to_perf_probe_point(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
{
- pp->function = strdup(tp->symbol);
+ int i;
- if (pp->function == NULL)
- return -ENOMEM;
-
- pp->offset = tp->offset;
- pp->retprobe = tp->retprobe;
-
- return 0;
+ for (i = 0; i < ntevs; i++)
+ clear_probe_trace_event(tevs + i);
}
#ifdef HAVE_DWARF_SUPPORT
+
/* Open new debuginfo of given module */
static struct debuginfo *open_debuginfo(const char *module)
{
- const char *path;
+ const char *path = module;
- /* A file path -- this is an offline module */
- if (module && strchr(module, '/'))
- path = module;
- else {
+ if (!module || !strchr(module, '/')) {
path = kernel_get_module_path(module);
-
if (!path) {
pr_err("Failed to find path of %s module.\n",
module ?: "kernel");
@@ -253,46 +273,6 @@
return debuginfo__new(path);
}
-/*
- * Convert trace point to probe point with debuginfo
- * Currently only handles kprobes.
- */
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
-{
- struct symbol *sym;
- struct map *map;
- u64 addr;
- int ret = -ENOENT;
- struct debuginfo *dinfo;
-
- sym = __find_kernel_function_by_name(tp->symbol, &map);
- if (sym) {
- addr = map->unmap_ip(map, sym->start + tp->offset);
- pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
- tp->offset, addr);
-
- dinfo = debuginfo__new_online_kernel(addr);
- if (dinfo) {
- ret = debuginfo__find_probe_point(dinfo,
- (unsigned long)addr, pp);
- debuginfo__delete(dinfo);
- } else {
- pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n",
- addr);
- ret = -ENOENT;
- }
- }
- if (ret <= 0) {
- pr_debug("Failed to find corresponding probes from "
- "debuginfo. Use kprobe event information.\n");
- return convert_to_perf_probe_point(tp, pp);
- }
- pp->retprobe = tp->retprobe;
-
- return 0;
-}
-
static int get_text_start_address(const char *exec, unsigned long *address)
{
Elf *elf;
@@ -321,12 +301,62 @@
return ret;
}
+/*
+ * Convert trace point to probe point with debuginfo
+ */
+static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
+{
+ struct debuginfo *dinfo = NULL;
+ unsigned long stext = 0;
+ u64 addr = tp->address;
+ int ret = -ENOENT;
+
+ /* convert the address to dwarf address */
+ if (!is_kprobe) {
+ if (!addr) {
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = get_text_start_address(tp->module, &stext);
+ if (ret < 0)
+ goto error;
+ addr += stext;
+ } else {
+ addr = kernel_get_symbol_address_by_name(tp->symbol, false);
+ if (addr == 0)
+ goto error;
+ addr += tp->offset;
+ }
+
+ pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
+ tp->module ? : "kernel");
+
+ dinfo = open_debuginfo(tp->module);
+ if (dinfo) {
+ ret = debuginfo__find_probe_point(dinfo,
+ (unsigned long)addr, pp);
+ debuginfo__delete(dinfo);
+ } else {
+ pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr);
+ ret = -ENOENT;
+ }
+
+ if (ret > 0) {
+ pp->retprobe = tp->retprobe;
+ return 0;
+ }
+error:
+ pr_debug("Failed to find corresponding probes from debuginfo.\n");
+ return ret ? : -ENOENT;
+}
+
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec)
{
int i, ret = 0;
- unsigned long offset, stext = 0;
- char buf[32];
+ unsigned long stext = 0;
if (!exec)
return 0;
@@ -336,16 +366,10 @@
return ret;
for (i = 0; i < ntevs && ret >= 0; i++) {
- offset = tevs[i].point.address - stext;
- offset += tevs[i].point.offset;
- tevs[i].point.offset = 0;
- zfree(&tevs[i].point.symbol);
- ret = e_snprintf(buf, 32, "0x%lx", offset);
- if (ret < 0)
- break;
+ /* point.address is the addres of point.symbol + point.offset */
+ tevs[i].point.address -= stext;
tevs[i].point.module = strdup(exec);
- tevs[i].point.symbol = strdup(buf);
- if (!tevs[i].point.symbol || !tevs[i].point.module) {
+ if (!tevs[i].point.module) {
ret = -ENOMEM;
break;
}
@@ -388,12 +412,40 @@
return ret;
}
-static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ bool uprobe)
{
+ struct ref_reloc_sym *reloc_sym;
+ char *tmp;
int i;
- for (i = 0; i < ntevs; i++)
- clear_probe_trace_event(tevs + i);
+ if (uprobe)
+ return add_exec_to_probe_trace_events(tevs, ntevs, module);
+
+ /* Note that currently ref_reloc_sym based probe is not for drivers */
+ if (module)
+ return add_module_to_probe_trace_events(tevs, ntevs, module);
+
+ reloc_sym = kernel_get_ref_reloc_sym();
+ if (!reloc_sym) {
+ pr_warning("Relocated base symbol is not found!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ if (tevs[i].point.address) {
+ tmp = strdup(reloc_sym->name);
+ if (!tmp)
+ return -ENOMEM;
+ free(tevs[i].point.symbol);
+ tevs[i].point.symbol = tmp;
+ tevs[i].point.offset = tevs[i].point.address -
+ reloc_sym->unrelocated_addr;
+ }
+ }
+ return 0;
}
/* Try to find perf_probe_event with debuginfo */
@@ -416,21 +468,16 @@
return 0;
}
+ pr_debug("Try to find probe point from debuginfo.\n");
/* Searching trace events corresponding to a probe event */
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
debuginfo__delete(dinfo);
if (ntevs > 0) { /* Succeeded to find trace events */
- pr_debug("find %d probe_trace_events.\n", ntevs);
- if (target) {
- if (pev->uprobes)
- ret = add_exec_to_probe_trace_events(*tevs,
- ntevs, target);
- else
- ret = add_module_to_probe_trace_events(*tevs,
- ntevs, target);
- }
+ pr_debug("Found %d probe_trace_events.\n", ntevs);
+ ret = post_process_probe_trace_events(*tevs, ntevs,
+ target, pev->uprobes);
if (ret < 0) {
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
@@ -563,20 +610,16 @@
* Show line-range always requires debuginfo to find source file and
* line number.
*/
-int show_line_range(struct line_range *lr, const char *module)
+static int __show_line_range(struct line_range *lr, const char *module)
{
int l = 1;
- struct line_node *ln;
+ struct int_node *ln;
struct debuginfo *dinfo;
FILE *fp;
int ret;
char *tmp;
/* Search a line range */
- ret = init_vmlinux();
- if (ret < 0)
- return ret;
-
dinfo = open_debuginfo(module);
if (!dinfo) {
pr_warning("Failed to open debuginfo file.\n");
@@ -623,8 +666,8 @@
goto end;
}
- list_for_each_entry(ln, &lr->line_list, list) {
- for (; ln->line > l; l++) {
+ intlist__for_each(ln, lr->line_list) {
+ for (; ln->i > l; l++) {
ret = show_one_line(fp, l - lr->offset);
if (ret < 0)
goto end;
@@ -646,6 +689,19 @@
return ret;
}
+int show_line_range(struct line_range *lr, const char *module)
+{
+ int ret;
+
+ ret = init_symbol_maps(false);
+ if (ret < 0)
+ return ret;
+ ret = __show_line_range(lr, module);
+ exit_symbol_maps();
+
+ return ret;
+}
+
static int show_available_vars_at(struct debuginfo *dinfo,
struct perf_probe_event *pev,
int max_vls, struct strfilter *_filter,
@@ -707,14 +763,15 @@
int i, ret = 0;
struct debuginfo *dinfo;
- ret = init_vmlinux();
+ ret = init_symbol_maps(false);
if (ret < 0)
return ret;
dinfo = open_debuginfo(module);
if (!dinfo) {
pr_warning("Failed to open debuginfo file.\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
setup_pager();
@@ -724,23 +781,19 @@
externs);
debuginfo__delete(dinfo);
+out:
+ exit_symbol_maps();
return ret;
}
#else /* !HAVE_DWARF_SUPPORT */
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+static int
+find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
+ struct perf_probe_point *pp __maybe_unused,
+ bool is_kprobe __maybe_unused)
{
- struct symbol *sym;
-
- sym = __find_kernel_function_by_name(tp->symbol, NULL);
- if (!sym) {
- pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
- return -ENOENT;
- }
-
- return convert_to_perf_probe_point(tp, pp);
+ return -ENOSYS;
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
@@ -776,24 +829,22 @@
void line_range__clear(struct line_range *lr)
{
- struct line_node *ln;
-
free(lr->function);
free(lr->file);
free(lr->path);
free(lr->comp_dir);
- while (!list_empty(&lr->line_list)) {
- ln = list_first_entry(&lr->line_list, struct line_node, list);
- list_del(&ln->list);
- free(ln);
- }
+ intlist__delete(lr->line_list);
memset(lr, 0, sizeof(*lr));
}
-void line_range__init(struct line_range *lr)
+int line_range__init(struct line_range *lr)
{
memset(lr, 0, sizeof(*lr));
- INIT_LIST_HEAD(&lr->line_list);
+ lr->line_list = intlist__new(NULL);
+ if (!lr->line_list)
+ return -ENOMEM;
+ else
+ return 0;
}
static int parse_line_num(char **ptr, int *val, const char *what)
@@ -1267,16 +1318,21 @@
} else
p = argv[1];
fmt1_str = strtok_r(p, "+", &fmt);
- tp->symbol = strdup(fmt1_str);
- if (tp->symbol == NULL) {
- ret = -ENOMEM;
- goto out;
+ if (fmt1_str[0] == '0') /* only the address started with 0x */
+ tp->address = strtoul(fmt1_str, NULL, 0);
+ else {
+ /* Only the symbol-based probe has offset */
+ tp->symbol = strdup(fmt1_str);
+ if (tp->symbol == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fmt2_str = strtok_r(NULL, "", &fmt);
+ if (fmt2_str == NULL)
+ tp->offset = 0;
+ else
+ tp->offset = strtoul(fmt2_str, NULL, 10);
}
- fmt2_str = strtok_r(NULL, "", &fmt);
- if (fmt2_str == NULL)
- tp->offset = 0;
- else
- tp->offset = strtoul(fmt2_str, NULL, 10);
tev->nargs = argc - 2;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
@@ -1518,20 +1574,27 @@
if (buf == NULL)
return NULL;
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event);
+ if (len <= 0)
+ goto error;
+
+ /* Uprobes must have tp->address and tp->module */
+ if (tev->uprobes && (!tp->address || !tp->module))
+ goto error;
+
+ /* Use the tp->address for uprobes */
if (tev->uprobes)
- len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s:%s",
- tp->retprobe ? 'r' : 'p',
- tev->group, tev->event,
- tp->module, tp->symbol);
+ ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
+ tp->module, tp->address);
else
- len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu",
- tp->retprobe ? 'r' : 'p',
- tev->group, tev->event,
+ ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
tp->module ?: "", tp->module ? ":" : "",
tp->symbol, tp->offset);
- if (len <= 0)
+ if (ret <= 0)
goto error;
+ len += ret;
for (i = 0; i < tev->nargs; i++) {
ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
@@ -1547,6 +1610,79 @@
return NULL;
}
+static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
+{
+ struct symbol *sym = NULL;
+ struct map *map;
+ u64 addr;
+ int ret = -ENOENT;
+
+ if (!is_kprobe) {
+ map = dso__new_map(tp->module);
+ if (!map)
+ goto out;
+ addr = tp->address;
+ sym = map__find_symbol(map, addr, NULL);
+ } else {
+ addr = kernel_get_symbol_address_by_name(tp->symbol, true);
+ if (addr) {
+ addr += tp->offset;
+ sym = __find_kernel_function(addr, &map);
+ }
+ }
+ if (!sym)
+ goto out;
+
+ pp->retprobe = tp->retprobe;
+ pp->offset = addr - map->unmap_ip(map, sym->start);
+ pp->function = strdup(sym->name);
+ ret = pp->function ? 0 : -ENOMEM;
+
+out:
+ if (map && !is_kprobe) {
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+
+ return ret;
+}
+
+static int convert_to_perf_probe_point(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
+{
+ char buf[128];
+ int ret;
+
+ ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe);
+ if (!ret)
+ return 0;
+ ret = find_perf_probe_point_from_map(tp, pp, is_kprobe);
+ if (!ret)
+ return 0;
+
+ pr_debug("Failed to find probe point from both of dwarf and map.\n");
+
+ if (tp->symbol) {
+ pp->function = strdup(tp->symbol);
+ pp->offset = tp->offset;
+ } else if (!tp->module && !is_kprobe) {
+ ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+ if (ret < 0)
+ return ret;
+ pp->function = strdup(buf);
+ pp->offset = 0;
+ }
+ if (pp->function == NULL)
+ return -ENOMEM;
+
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
struct perf_probe_event *pev, bool is_kprobe)
{
@@ -1560,11 +1696,7 @@
return -ENOMEM;
/* Convert trace_point to probe_point */
- if (is_kprobe)
- ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
- else
- ret = convert_to_perf_probe_point(&tev->point, &pev->point);
-
+ ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe);
if (ret < 0)
return ret;
@@ -1731,7 +1863,8 @@
}
/* Show an event */
-static int show_perf_probe_event(struct perf_probe_event *pev)
+static int show_perf_probe_event(struct perf_probe_event *pev,
+ const char *module)
{
int i, ret;
char buf[128];
@@ -1747,6 +1880,8 @@
return ret;
printf(" %-20s (on %s", buf, place);
+ if (module)
+ printf(" in %s", module);
if (pev->nargs > 0) {
printf(" with");
@@ -1784,7 +1919,8 @@
ret = convert_to_perf_probe_event(&tev, &pev,
is_kprobe);
if (ret >= 0)
- ret = show_perf_probe_event(&pev);
+ ret = show_perf_probe_event(&pev,
+ tev.point.module);
}
clear_perf_probe_event(&pev);
clear_probe_trace_event(&tev);
@@ -1807,7 +1943,7 @@
if (fd < 0)
return fd;
- ret = init_vmlinux();
+ ret = init_symbol_maps(false);
if (ret < 0)
return ret;
@@ -1820,6 +1956,7 @@
close(fd);
}
+ exit_symbol_maps();
return ret;
}
@@ -1982,7 +2119,7 @@
group = pev->group;
pev->event = tev->event;
pev->group = tev->group;
- show_perf_probe_event(pev);
+ show_perf_probe_event(pev, tev->point.module);
/* Trick here - restore current event/group */
pev->event = (char *)event;
pev->group = (char *)group;
@@ -2008,13 +2145,159 @@
return ret;
}
+static char *looking_function_name;
+static int num_matched_functions;
+
+static int probe_function_filter(struct map *map __maybe_unused,
+ struct symbol *sym)
+{
+ if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
+ strcmp(looking_function_name, sym->name) == 0) {
+ num_matched_functions++;
+ return 0;
+ }
+ return 1;
+}
+
+#define strdup_or_goto(str, label) \
+ ({ char *__p = strdup(str); if (!__p) goto label; __p; })
+
+/*
+ * Find probe function addresses from map.
+ * Return an error or the number of found probe_trace_event
+ */
+static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
+{
+ struct map *map = NULL;
+ struct kmap *kmap = NULL;
+ struct ref_reloc_sym *reloc_sym = NULL;
+ struct symbol *sym;
+ struct rb_node *nd;
+ struct probe_trace_event *tev;
+ struct perf_probe_point *pp = &pev->point;
+ struct probe_trace_point *tp;
+ int ret, i;
+
+ /* Init maps of given executable or kernel */
+ if (pev->uprobes)
+ map = dso__new_map(target);
+ else
+ map = kernel_get_module_map(target);
+ if (!map) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Load matched symbols: Since the different local symbols may have
+ * same name but different addresses, this lists all the symbols.
+ */
+ num_matched_functions = 0;
+ looking_function_name = pp->function;
+ ret = map__load(map, probe_function_filter);
+ if (ret || num_matched_functions == 0) {
+ pr_err("Failed to find symbol %s in %s\n", pp->function,
+ target ? : "kernel");
+ ret = -ENOENT;
+ goto out;
+ } else if (num_matched_functions > max_tevs) {
+ pr_err("Too many functions matched in %s\n",
+ target ? : "kernel");
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (!pev->uprobes) {
+ kmap = map__kmap(map);
+ reloc_sym = kmap->ref_reloc_sym;
+ if (!reloc_sym) {
+ pr_warning("Relocated base symbol is not found!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Setup result trace-probe-events */
+ *tevs = zalloc(sizeof(*tev) * num_matched_functions);
+ if (!*tevs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = 0;
+ map__for_each_symbol(map, sym, nd) {
+ tev = (*tevs) + ret;
+ tp = &tev->point;
+ if (ret == num_matched_functions) {
+ pr_warning("Too many symbols are listed. Skip it.\n");
+ break;
+ }
+ ret++;
+
+ if (pp->offset > sym->end - sym->start) {
+ pr_warning("Offset %ld is bigger than the size of %s\n",
+ pp->offset, sym->name);
+ ret = -ENOENT;
+ goto err_out;
+ }
+ /* Add one probe point */
+ tp->address = map->unmap_ip(map, sym->start) + pp->offset;
+ if (reloc_sym) {
+ tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out);
+ tp->offset = tp->address - reloc_sym->addr;
+ } else {
+ tp->symbol = strdup_or_goto(sym->name, nomem_out);
+ tp->offset = pp->offset;
+ }
+ tp->retprobe = pp->retprobe;
+ if (target)
+ tev->point.module = strdup_or_goto(target, nomem_out);
+ tev->uprobes = pev->uprobes;
+ tev->nargs = pev->nargs;
+ if (tev->nargs) {
+ tev->args = zalloc(sizeof(struct probe_trace_arg) *
+ tev->nargs);
+ if (tev->args == NULL)
+ goto nomem_out;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ if (pev->args[i].name)
+ tev->args[i].name =
+ strdup_or_goto(pev->args[i].name,
+ nomem_out);
+
+ tev->args[i].value = strdup_or_goto(pev->args[i].var,
+ nomem_out);
+ if (pev->args[i].type)
+ tev->args[i].type =
+ strdup_or_goto(pev->args[i].type,
+ nomem_out);
+ }
+ }
+
+out:
+ if (map && pev->uprobes) {
+ /* Only when using uprobe(exec) map needs to be released */
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+ return ret;
+
+nomem_out:
+ ret = -ENOMEM;
+err_out:
+ clear_probe_trace_events(*tevs, num_matched_functions);
+ zfree(tevs);
+ goto out;
+}
+
static int convert_to_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs, const char *target)
{
- struct symbol *sym;
- int ret, i;
- struct probe_trace_event *tev;
+ int ret;
if (pev->uprobes && !pev->group) {
/* Replace group name if not given */
@@ -2030,91 +2313,7 @@
if (ret != 0)
return ret; /* Found in debuginfo or got an error */
- if (pev->uprobes) {
- ret = convert_name_to_addr(pev, target);
- if (ret < 0)
- return ret;
- }
-
- /* Allocate trace event buffer */
- tev = *tevs = zalloc(sizeof(struct probe_trace_event));
- if (tev == NULL)
- return -ENOMEM;
-
- /* Copy parameters */
- tev->point.symbol = strdup(pev->point.function);
- if (tev->point.symbol == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (target) {
- tev->point.module = strdup(target);
- if (tev->point.module == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
- tev->point.offset = pev->point.offset;
- tev->point.retprobe = pev->point.retprobe;
- tev->nargs = pev->nargs;
- tev->uprobes = pev->uprobes;
-
- if (tev->nargs) {
- tev->args = zalloc(sizeof(struct probe_trace_arg)
- * tev->nargs);
- if (tev->args == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- for (i = 0; i < tev->nargs; i++) {
- if (pev->args[i].name) {
- tev->args[i].name = strdup(pev->args[i].name);
- if (tev->args[i].name == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- }
- tev->args[i].value = strdup(pev->args[i].var);
- if (tev->args[i].value == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- if (pev->args[i].type) {
- tev->args[i].type = strdup(pev->args[i].type);
- if (tev->args[i].type == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- }
- }
- }
-
- if (pev->uprobes)
- return 1;
-
- /* Currently just checking function name from symbol map */
- sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
- if (!sym) {
- pr_warning("Kernel symbol \'%s\' not found.\n",
- tev->point.symbol);
- ret = -ENOENT;
- goto error;
- } else if (tev->point.offset > sym->end - sym->start) {
- pr_warning("Offset specified is greater than size of %s\n",
- tev->point.symbol);
- ret = -ENOENT;
- goto error;
-
- }
-
- return 1;
-error:
- clear_probe_trace_event(tev);
- free(tev);
- *tevs = NULL;
- return ret;
+ return find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
}
struct __event_package {
@@ -2135,12 +2334,7 @@
if (pkgs == NULL)
return -ENOMEM;
- if (!pevs->uprobes)
- /* Init vmlinux path */
- ret = init_vmlinux();
- else
- ret = init_user_exec();
-
+ ret = init_symbol_maps(pevs->uprobes);
if (ret < 0) {
free(pkgs);
return ret;
@@ -2174,6 +2368,7 @@
zfree(&pkgs[i].tevs);
}
free(pkgs);
+ exit_symbol_maps();
return ret;
}
@@ -2323,159 +2518,51 @@
static int filter_available_functions(struct map *map __maybe_unused,
struct symbol *sym)
{
- if (sym->binding == STB_GLOBAL &&
+ if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
strfilter__compare(available_func_filter, sym->name))
return 0;
return 1;
}
-static int __show_available_funcs(struct map *map)
+int show_available_funcs(const char *target, struct strfilter *_filter,
+ bool user)
{
- if (map__load(map, filter_available_functions)) {
- pr_err("Failed to load map.\n");
+ struct map *map;
+ int ret;
+
+ ret = init_symbol_maps(user);
+ if (ret < 0)
+ return ret;
+
+ /* Get a symbol map */
+ if (user)
+ map = dso__new_map(target);
+ else
+ map = kernel_get_module_map(target);
+ if (!map) {
+ pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
return -EINVAL;
}
+
+ /* Load symbols with given filter */
+ available_func_filter = _filter;
+ if (map__load(map, filter_available_functions)) {
+ pr_err("Failed to load symbols in %s\n", (target) ? : "kernel");
+ goto end;
+ }
if (!dso__sorted_by_name(map->dso, map->type))
dso__sort_by_name(map->dso, map->type);
- dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
- return 0;
-}
-
-static int available_kernel_funcs(const char *module)
-{
- struct map *map;
- int ret;
-
- ret = init_vmlinux();
- if (ret < 0)
- return ret;
-
- map = kernel_get_module_map(module);
- if (!map) {
- pr_err("Failed to find %s map.\n", (module) ? : "kernel");
- return -EINVAL;
- }
- return __show_available_funcs(map);
-}
-
-static int available_user_funcs(const char *target)
-{
- struct map *map;
- int ret;
-
- ret = init_user_exec();
- if (ret < 0)
- return ret;
-
- map = dso__new_map(target);
- ret = __show_available_funcs(map);
- dso__delete(map->dso);
- map__delete(map);
- return ret;
-}
-
-int show_available_funcs(const char *target, struct strfilter *_filter,
- bool user)
-{
+ /* Show all (filtered) symbols */
setup_pager();
- available_func_filter = _filter;
-
- if (!user)
- return available_kernel_funcs(target);
-
- return available_user_funcs(target);
-}
-
-/*
- * uprobe_events only accepts address:
- * Convert function and any offset to address
- */
-static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
-{
- struct perf_probe_point *pp = &pev->point;
- struct symbol *sym;
- struct map *map = NULL;
- char *function = NULL;
- int ret = -EINVAL;
- unsigned long long vaddr = 0;
-
- if (!pp->function) {
- pr_warning("No function specified for uprobes");
- goto out;
- }
-
- function = strdup(pp->function);
- if (!function) {
- pr_warning("Failed to allocate memory by strdup.\n");
- ret = -ENOMEM;
- goto out;
- }
-
- map = dso__new_map(exec);
- if (!map) {
- pr_warning("Cannot find appropriate DSO for %s.\n", exec);
- goto out;
- }
- available_func_filter = strfilter__new(function, NULL);
- if (map__load(map, filter_available_functions)) {
- pr_err("Failed to load map.\n");
- goto out;
- }
-
- sym = map__find_symbol_by_name(map, function, NULL);
- if (!sym) {
- pr_warning("Cannot find %s in DSO %s\n", function, exec);
- goto out;
- }
-
- if (map->start > sym->start)
- vaddr = map->start;
- vaddr += sym->start + pp->offset + map->pgoff;
- pp->offset = 0;
-
- if (!pev->event) {
- pev->event = function;
- function = NULL;
- }
- if (!pev->group) {
- char *ptr1, *ptr2, *exec_copy;
-
- pev->group = zalloc(sizeof(char *) * 64);
- exec_copy = strdup(exec);
- if (!exec_copy) {
- ret = -ENOMEM;
- pr_warning("Failed to copy exec string.\n");
- goto out;
- }
-
- ptr1 = strdup(basename(exec_copy));
- if (ptr1) {
- ptr2 = strpbrk(ptr1, "-._");
- if (ptr2)
- *ptr2 = '\0';
- e_snprintf(pev->group, 64, "%s_%s", PERFPROBE_GROUP,
- ptr1);
- free(ptr1);
- }
- free(exec_copy);
- }
- free(pp->function);
- pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS);
- if (!pp->function) {
- ret = -ENOMEM;
- pr_warning("Failed to allocate memory by zalloc.\n");
- goto out;
- }
- e_snprintf(pp->function, MAX_PROBE_ARGS, "0x%llx", vaddr);
- ret = 0;
-
-out:
- if (map) {
+ dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+end:
+ if (user) {
dso__delete(map->dso);
map__delete(map);
}
- if (function)
- free(function);
+ exit_symbol_maps();
+
return ret;
}
+
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index fcaf727..776c934 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -2,6 +2,7 @@
#define _PROBE_EVENT_H
#include <stdbool.h>
+#include "intlist.h"
#include "strlist.h"
#include "strfilter.h"
@@ -76,13 +77,6 @@
struct perf_probe_arg *args; /* Arguments */
};
-
-/* Line number container */
-struct line_node {
- struct list_head list;
- int line;
-};
-
/* Line range */
struct line_range {
char *file; /* File name */
@@ -92,7 +86,7 @@
int offset; /* Start line offset */
char *path; /* Real path name */
char *comp_dir; /* Compile directory */
- struct list_head line_list; /* Visible lines */
+ struct intlist *line_list; /* Visible lines */
};
/* List of variables */
@@ -124,7 +118,7 @@
extern void line_range__clear(struct line_range *lr);
/* Initialize line range */
-extern void line_range__init(struct line_range *lr);
+extern int line_range__init(struct line_range *lr);
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 061edb1..df02386 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -34,7 +34,9 @@
#include <linux/bitops.h>
#include "event.h"
+#include "dso.h"
#include "debug.h"
+#include "intlist.h"
#include "util.h"
#include "symbol.h"
#include "probe-finder.h"
@@ -42,65 +44,6 @@
/* Kprobe tracer basic type is up to u64 */
#define MAX_BASIC_TYPE_BITS 64
-/* Line number list operations */
-
-/* Add a line to line number list */
-static int line_list__add_line(struct list_head *head, int line)
-{
- struct line_node *ln;
- struct list_head *p;
-
- /* Reverse search, because new line will be the last one */
- list_for_each_entry_reverse(ln, head, list) {
- if (ln->line < line) {
- p = &ln->list;
- goto found;
- } else if (ln->line == line) /* Already exist */
- return 1;
- }
- /* List is empty, or the smallest entry */
- p = head;
-found:
- pr_debug("line list: add a line %u\n", line);
- ln = zalloc(sizeof(struct line_node));
- if (ln == NULL)
- return -ENOMEM;
- ln->line = line;
- INIT_LIST_HEAD(&ln->list);
- list_add(&ln->list, p);
- return 0;
-}
-
-/* Check if the line in line number list */
-static int line_list__has_line(struct list_head *head, int line)
-{
- struct line_node *ln;
-
- /* Reverse search, because new line will be the last one */
- list_for_each_entry(ln, head, list)
- if (ln->line == line)
- return 1;
-
- return 0;
-}
-
-/* Init line number list */
-static void line_list__init(struct list_head *head)
-{
- INIT_LIST_HEAD(head);
-}
-
-/* Free line number list */
-static void line_list__free(struct list_head *head)
-{
- struct line_node *ln;
- while (!list_empty(head)) {
- ln = list_first_entry(head, struct line_node, list);
- list_del(&ln->list);
- free(ln);
- }
-}
-
/* Dwarf FL wrappers */
static char *debuginfo_path; /* Currently dummy */
@@ -147,80 +90,7 @@
return -ENOENT;
}
-#if _ELFUTILS_PREREQ(0, 148)
-/* This method is buggy if elfutils is older than 0.148 */
-static int __linux_kernel_find_elf(Dwfl_Module *mod,
- void **userdata,
- const char *module_name,
- Dwarf_Addr base,
- char **file_name, Elf **elfp)
-{
- int fd;
- const char *path = kernel_get_module_path(module_name);
-
- pr_debug2("Use file %s for %s\n", path, module_name);
- if (path) {
- fd = open(path, O_RDONLY);
- if (fd >= 0) {
- *file_name = strdup(path);
- return fd;
- }
- }
- /* If failed, try to call standard method */
- return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
- file_name, elfp);
-}
-
-static const Dwfl_Callbacks kernel_callbacks = {
- .find_debuginfo = dwfl_standard_find_debuginfo,
- .debuginfo_path = &debuginfo_path,
-
- .find_elf = __linux_kernel_find_elf,
- .section_address = dwfl_linux_kernel_module_section_address,
-};
-
-/* Get a Dwarf from live kernel image */
-static int debuginfo__init_online_kernel_dwarf(struct debuginfo *dbg,
- Dwarf_Addr addr)
-{
- dbg->dwfl = dwfl_begin(&kernel_callbacks);
- if (!dbg->dwfl)
- return -EINVAL;
-
- /* Load the kernel dwarves: Don't care the result here */
- dwfl_linux_kernel_report_kernel(dbg->dwfl);
- dwfl_linux_kernel_report_modules(dbg->dwfl);
-
- dbg->dbg = dwfl_addrdwarf(dbg->dwfl, addr, &dbg->bias);
- /* Here, check whether we could get a real dwarf */
- if (!dbg->dbg) {
- pr_debug("Failed to find kernel dwarf at %lx\n",
- (unsigned long)addr);
- dwfl_end(dbg->dwfl);
- memset(dbg, 0, sizeof(*dbg));
- return -ENOENT;
- }
-
- return 0;
-}
-#else
-/* With older elfutils, this just support kernel module... */
-static int debuginfo__init_online_kernel_dwarf(struct debuginfo *dbg,
- Dwarf_Addr addr __maybe_unused)
-{
- const char *path = kernel_get_module_path("kernel");
-
- if (!path) {
- pr_err("Failed to find vmlinux path\n");
- return -ENOENT;
- }
-
- pr_debug2("Use file %s for debuginfo\n", path);
- return debuginfo__init_offline_dwarf(dbg, path);
-}
-#endif
-
-struct debuginfo *debuginfo__new(const char *path)
+static struct debuginfo *__debuginfo__new(const char *path)
{
struct debuginfo *dbg = zalloc(sizeof(*dbg));
if (!dbg)
@@ -228,21 +98,44 @@
if (debuginfo__init_offline_dwarf(dbg, path) < 0)
zfree(&dbg);
-
+ if (dbg)
+ pr_debug("Open Debuginfo file: %s\n", path);
return dbg;
}
-struct debuginfo *debuginfo__new_online_kernel(unsigned long addr)
+enum dso_binary_type distro_dwarf_types[] = {
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+struct debuginfo *debuginfo__new(const char *path)
{
- struct debuginfo *dbg = zalloc(sizeof(*dbg));
+ enum dso_binary_type *type;
+ char buf[PATH_MAX], nil = '\0';
+ struct dso *dso;
+ struct debuginfo *dinfo = NULL;
- if (!dbg)
- return NULL;
+ /* Try to open distro debuginfo files */
+ dso = dso__new(path);
+ if (!dso)
+ goto out;
- if (debuginfo__init_online_kernel_dwarf(dbg, (Dwarf_Addr)addr) < 0)
- zfree(&dbg);
+ for (type = distro_dwarf_types;
+ !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
+ type++) {
+ if (dso__read_binary_type_filename(dso, *type, &nil,
+ buf, PATH_MAX) < 0)
+ continue;
+ dinfo = __debuginfo__new(buf);
+ }
+ dso__delete(dso);
- return dbg;
+out:
+ /* if failed to open all distro debuginfo, open given binary */
+ return dinfo ? : __debuginfo__new(path);
}
void debuginfo__delete(struct debuginfo *dbg)
@@ -880,7 +773,7 @@
}
/* Find lines which match lazy pattern */
-static int find_lazy_match_lines(struct list_head *head,
+static int find_lazy_match_lines(struct intlist *list,
const char *fname, const char *pat)
{
FILE *fp;
@@ -901,7 +794,7 @@
line[len - 1] = '\0';
if (strlazymatch(line, pat)) {
- line_list__add_line(head, linenum);
+ intlist__add(list, linenum);
count++;
}
linenum++;
@@ -924,7 +817,7 @@
Dwarf_Die *sc_die, die_mem;
int ret;
- if (!line_list__has_line(&pf->lcache, lineno) ||
+ if (!intlist__has_entry(pf->lcache, lineno) ||
strtailcmp(fname, pf->fname) != 0)
return 0;
@@ -952,9 +845,9 @@
{
int ret = 0;
- if (list_empty(&pf->lcache)) {
+ if (intlist__empty(pf->lcache)) {
/* Matching lazy line pattern */
- ret = find_lazy_match_lines(&pf->lcache, pf->fname,
+ ret = find_lazy_match_lines(pf->lcache, pf->fname,
pf->pev->point.lazy_line);
if (ret <= 0)
return ret;
@@ -1096,7 +989,9 @@
#endif
off = 0;
- line_list__init(&pf->lcache);
+ pf->lcache = intlist__new(NULL);
+ if (!pf->lcache)
+ return -ENOMEM;
/* Fastpath: lookup by function name from .debug_pubnames section */
if (pp->function) {
@@ -1149,7 +1044,8 @@
}
found:
- line_list__free(&pf->lcache);
+ intlist__delete(pf->lcache);
+ pf->lcache = NULL;
return ret;
}
@@ -1537,7 +1433,7 @@
if (lr->path == NULL)
return -ENOMEM;
}
- return line_list__add_line(&lr->line_list, lineno);
+ return intlist__add(lr->line_list, lineno);
}
static int line_range_walk_cb(const char *fname, int lineno,
@@ -1565,7 +1461,7 @@
/* Update status */
if (ret >= 0)
- if (!list_empty(&lf->lr->line_list))
+ if (!intlist__empty(lf->lr->line_list))
ret = lf->found = 1;
else
ret = 0; /* Lines are not found */
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index ffc33cd..92590b2 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -3,6 +3,7 @@
#include <stdbool.h>
#include "util.h"
+#include "intlist.h"
#include "probe-event.h"
#define MAX_PROBE_BUFFER 1024
@@ -29,8 +30,8 @@
Dwarf_Addr bias;
};
+/* This also tries to open distro debuginfo */
extern struct debuginfo *debuginfo__new(const char *path);
-extern struct debuginfo *debuginfo__new_online_kernel(unsigned long addr);
extern void debuginfo__delete(struct debuginfo *dbg);
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
@@ -66,7 +67,7 @@
const char *fname; /* Real file name */
Dwarf_Die cu_die; /* Current CU */
Dwarf_Die sp_die;
- struct list_head lcache; /* Line cache for lazy match */
+ struct intlist *lcache; /* Line cache for lazy match */
/* For variable searching */
#if _ELFUTILS_PREREQ(0, 142)
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 595bfc7..16a475a 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -17,6 +17,6 @@
util/cgroup.c
util/rblist.c
util/strlist.c
-util/fs.c
+../lib/api/fs/fs.c
util/trace-event.c
../../lib/rbtree.c
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 3737625..049e0a0 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -2,7 +2,7 @@
#include "evsel.h"
#include "cpumap.h"
#include "parse-events.h"
-#include "fs.h"
+#include <api/fs/fs.h>
#include "util.h"
typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0b39a48..55960f2 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -702,11 +702,12 @@
}
}
-static void regs_user__printf(struct perf_sample *sample, u64 mask)
+static void regs_user__printf(struct perf_sample *sample)
{
struct regs_dump *user_regs = &sample->user_regs;
if (user_regs->regs) {
+ u64 mask = user_regs->mask;
printf("... user regs: mask 0x%" PRIx64 "\n", mask);
regs_dump__printf(mask, user_regs->regs);
}
@@ -793,7 +794,7 @@
if (!dump_trace)
return;
- printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
+ printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
@@ -806,7 +807,7 @@
branch_stack__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_USER)
- regs_user__printf(sample, evsel->attr.sample_regs_user);
+ regs_user__printf(sample);
if (sample_type & PERF_SAMPLE_STACK_USER)
stack_user__printf(&sample->user_stack);
@@ -1008,6 +1009,12 @@
if (err == 0)
perf_session__set_id_hdr_size(session);
return err;
+ case PERF_RECORD_HEADER_EVENT_TYPE:
+ /*
+ * Depreceated, but we need to handle it for sake
+ * of old data files create in pipe mode.
+ */
+ return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(fd, file_offset, SEEK_SET);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 3e9f336..3b7dbf5 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -151,15 +151,15 @@
gelf_getshdr(sec, shp);
str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
- if (!strcmp(name, str)) {
+ if (str && !strcmp(name, str)) {
if (idx)
*idx = cnt;
- break;
+ return sec;
}
++cnt;
}
- return sec;
+ return NULL;
}
#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
@@ -506,6 +506,8 @@
/* the start of this section is a zero-terminated string */
strncpy(debuglink, data->d_buf, size);
+ err = 0;
+
out_elf_end:
elf_end(elf);
out_close:
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a9d758a..95e2497 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -410,7 +410,7 @@
return symbols__find(&dso->symbols[type], addr);
}
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
{
return symbols__first(&dso->symbols[type]);
}
@@ -1251,6 +1251,46 @@
return -1;
}
+static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
+ enum dso_binary_type type)
+{
+ switch (type) {
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__DEBUGLINK:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ return !kmod && dso->kernel == DSO_TYPE_USER;
+
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__KCORE:
+ return dso->kernel == DSO_TYPE_KERNEL;
+
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ return dso->kernel == DSO_TYPE_GUEST_KERNEL;
+
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ /*
+ * kernel modules know their symtab type - it's set when
+ * creating a module dso in machine__new_module().
+ */
+ return kmod && dso->symtab_type == type;
+
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ return true;
+
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ default:
+ return false;
+ }
+}
+
int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
{
char *name;
@@ -1261,6 +1301,7 @@
int ss_pos = 0;
struct symsrc ss_[2];
struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
+ bool kmod;
dso__set_loaded(dso, map->type);
@@ -1301,7 +1342,11 @@
if (!name)
return -1;
- /* Iterate over candidate debug images.
+ kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+
+ /*
+ * Iterate over candidate debug images.
* Keep track of "interesting" ones (those which have a symtab, dynsym,
* and/or opd section) for processing.
*/
@@ -1311,6 +1356,9 @@
enum dso_binary_type symtab_type = binary_type_symtab[i];
+ if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
+ continue;
+
if (dso__read_binary_type_filename(dso, symtab_type,
root_dir, name, PATH_MAX))
continue;
@@ -1336,6 +1384,8 @@
if (syms_ss && runtime_ss)
break;
+ } else {
+ symsrc__destroy(ss);
}
}
@@ -1351,15 +1401,10 @@
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
- if (syms_ss) {
- int km;
-
- km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
- ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km);
- } else {
+ if (syms_ss)
+ ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
+ else
ret = -1;
- }
if (ret > 0) {
int nr_plt;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index fffe288..501e4e7 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -79,6 +79,17 @@
void symbol__delete(struct symbol *sym);
void symbols__delete(struct rb_root *symbols);
+/* symbols__for_each_entry - iterate over symbols (rb_root)
+ *
+ * @symbols: the rb_root of symbols
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @nd: the 'struct rb_node *' to use as a temporary storage
+ */
+#define symbols__for_each_entry(symbols, pos, nd) \
+ for (nd = rb_first(symbols); \
+ nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
+ nd = rb_next(nd))
+
static inline size_t symbol__size(const struct symbol *sym)
{
return sym->end - sym->start + 1;
@@ -175,7 +186,7 @@
struct symbol *sym;
u64 addr;
char level;
- bool filtered;
+ u8 filtered;
u8 cpumode;
s32 cpu;
};
@@ -223,7 +234,6 @@
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0358882..3ce0498 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -142,3 +142,24 @@
return 0;
}
+
+void thread__find_cpumode_addr_location(struct thread *thread,
+ struct machine *machine,
+ enum map_type type, u64 addr,
+ struct addr_location *al)
+{
+ size_t i;
+ const u8 const cpumodes[] = {
+ PERF_RECORD_MISC_USER,
+ PERF_RECORD_MISC_KERNEL,
+ PERF_RECORD_MISC_GUEST_USER,
+ PERF_RECORD_MISC_GUEST_KERNEL
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
+ thread__find_addr_location(thread, machine, cpumodes[i], type,
+ addr, al);
+ if (al->map)
+ break;
+ }
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 5b856bf..9b29f08 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -44,12 +44,6 @@
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
size_t thread__fprintf(struct thread *thread, FILE *fp);
-static inline struct map *thread__find_map(struct thread *thread,
- enum map_type type, u64 addr)
-{
- return thread ? map_groups__find(&thread->mg, type, addr) : NULL;
-}
-
void thread__find_addr_map(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al);
@@ -58,6 +52,11 @@
u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al);
+void thread__find_cpumode_addr_location(struct thread *thread,
+ struct machine *machine,
+ enum map_type type, u64 addr,
+ struct addr_location *al);
+
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index e0d6d07f..c36636f 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -126,6 +126,7 @@
trace_seq_init(&s);
pevent_event_info(&s, event, &record);
trace_seq_do_printf(&s);
+ trace_seq_destroy(&s);
}
void parse_proc_kallsyms(struct pevent *pevent,
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
new file mode 100644
index 0000000..67db73e
--- /dev/null
+++ b/tools/perf/util/unwind-libdw.c
@@ -0,0 +1,210 @@
+#include <linux/compiler.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include "unwind.h"
+#include "unwind-libdw.h"
+#include "machine.h"
+#include "thread.h"
+#include "types.h"
+#include "event.h"
+#include "perf_regs.h"
+
+static char *debuginfo_path;
+
+static const Dwfl_Callbacks offline_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+ .section_address = dwfl_offline_section_address,
+};
+
+static int __report_module(struct addr_location *al, u64 ip,
+ struct unwind_info *ui)
+{
+ Dwfl_Module *mod;
+ struct dso *dso = NULL;
+
+ thread__find_addr_location(ui->thread, ui->machine,
+ PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, al);
+
+ if (al->map)
+ dso = al->map->dso;
+
+ if (!dso)
+ return 0;
+
+ mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (!mod)
+ mod = dwfl_report_elf(ui->dwfl, dso->short_name,
+ dso->long_name, -1, al->map->start,
+ false);
+
+ return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
+}
+
+static int report_module(u64 ip, struct unwind_info *ui)
+{
+ struct addr_location al;
+
+ return __report_module(&al, ip, ui);
+}
+
+static int entry(u64 ip, struct unwind_info *ui)
+
+{
+ struct unwind_entry e;
+ struct addr_location al;
+
+ if (__report_module(&al, ip, ui))
+ return -1;
+
+ e.ip = ip;
+ e.map = al.map;
+ e.sym = al.sym;
+
+ pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+ al.sym ? al.sym->name : "''",
+ ip,
+ al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+ return ui->cb(&e, ui->arg);
+}
+
+static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
+{
+ /* We want only single thread to be processed. */
+ if (*thread_argp != NULL)
+ return 0;
+
+ *thread_argp = arg;
+ return dwfl_pid(dwfl);
+}
+
+static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
+ Dwarf_Word *data)
+{
+ struct addr_location al;
+ ssize_t size;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, addr, &al);
+ if (!al.map) {
+ pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+ return -1;
+ }
+
+ if (!al.map->dso)
+ return -1;
+
+ size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+ addr, (u8 *) data, sizeof(*data));
+
+ return !(size == sizeof(*data));
+}
+
+static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result,
+ void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct stack_dump *stack = &ui->sample->user_stack;
+ u64 start, end;
+ int offset;
+ int ret;
+
+ ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
+ if (ret)
+ return false;
+
+ end = start + stack->size;
+
+ /* Check overflow. */
+ if (addr + sizeof(Dwarf_Word) < addr)
+ return false;
+
+ if (addr < start || addr + sizeof(Dwarf_Word) > end) {
+ ret = access_dso_mem(ui, addr, result);
+ if (ret) {
+ pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range"
+ " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ addr, start, end);
+ return false;
+ }
+ return true;
+ }
+
+ offset = addr - start;
+ *result = *(Dwarf_Word *)&stack->data[offset];
+ pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n",
+ addr, (unsigned long)*result, offset);
+ return true;
+}
+
+static const Dwfl_Thread_Callbacks callbacks = {
+ .next_thread = next_thread,
+ .memory_read = memory_read,
+ .set_initial_registers = libdw__arch_set_initial_registers,
+};
+
+static int
+frame_callback(Dwfl_Frame *state, void *arg)
+{
+ struct unwind_info *ui = arg;
+ Dwarf_Addr pc;
+
+ if (!dwfl_frame_pc(state, &pc, NULL)) {
+ pr_err("%s", dwfl_errmsg(-1));
+ return DWARF_CB_ABORT;
+ }
+
+ return entry(pc, ui) || !(--ui->max_stack) ?
+ DWARF_CB_ABORT : DWARF_CB_OK;
+}
+
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine, struct thread *thread,
+ struct perf_sample *data,
+ int max_stack)
+{
+ struct unwind_info ui = {
+ .sample = data,
+ .thread = thread,
+ .machine = machine,
+ .cb = cb,
+ .arg = arg,
+ .max_stack = max_stack,
+ };
+ Dwarf_Word ip;
+ int err = -EINVAL;
+
+ if (!data->user_regs.regs)
+ return -EINVAL;
+
+ ui.dwfl = dwfl_begin(&offline_callbacks);
+ if (!ui.dwfl)
+ goto out;
+
+ err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
+ if (err)
+ goto out;
+
+ err = report_module(ip, &ui);
+ if (err)
+ goto out;
+
+ if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui))
+ goto out;
+
+ err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui);
+
+ if (err && !ui.max_stack)
+ err = 0;
+
+ out:
+ if (err)
+ pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
+
+ dwfl_end(ui.dwfl);
+ return 0;
+}
diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
new file mode 100644
index 0000000..417a142
--- /dev/null
+++ b/tools/perf/util/unwind-libdw.h
@@ -0,0 +1,21 @@
+#ifndef __PERF_UNWIND_LIBDW_H
+#define __PERF_UNWIND_LIBDW_H
+
+#include <elfutils/libdwfl.h>
+#include "event.h"
+#include "thread.h"
+#include "unwind.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg);
+
+struct unwind_info {
+ Dwfl *dwfl;
+ struct perf_sample *sample;
+ struct machine *machine;
+ struct thread *thread;
+ unwind_entry_cb_t cb;
+ void *arg;
+ int max_stack;
+};
+
+#endif /* __PERF_UNWIND_LIBDW_H */
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind-libunwind.c
similarity index 92%
rename from tools/perf/util/unwind.c
rename to tools/perf/util/unwind-libunwind.c
index 742f23b..bd5768d 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -86,7 +86,6 @@
struct perf_sample *sample;
struct machine *machine;
struct thread *thread;
- u64 sample_uregs;
};
#define dw_read(ptr, type, end) ({ \
@@ -391,30 +390,13 @@
return !(size == sizeof(*data));
}
-static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id,
- u64 sample_regs)
-{
- int i, idx = 0;
-
- if (!(sample_regs & (1 << id)))
- return -EINVAL;
-
- for (i = 0; i < id; i++) {
- if (sample_regs & (1 << i))
- idx++;
- }
-
- *valp = regs->regs[idx];
- return 0;
-}
-
static int access_mem(unw_addr_space_t __maybe_unused as,
unw_word_t addr, unw_word_t *valp,
int __write, void *arg)
{
struct unwind_info *ui = arg;
struct stack_dump *stack = &ui->sample->user_stack;
- unw_word_t start, end;
+ u64 start, end;
int offset;
int ret;
@@ -424,8 +406,7 @@
return 0;
}
- ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP,
- ui->sample_uregs);
+ ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
if (ret)
return ret;
@@ -438,8 +419,9 @@
if (addr < start || addr + sizeof(unw_word_t) >= end) {
ret = access_dso_mem(ui, addr, valp);
if (ret) {
- pr_debug("unwind: access_mem %p not inside range %p-%p\n",
- (void *)addr, (void *)start, (void *)end);
+ pr_debug("unwind: access_mem %p not inside range"
+ " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ (void *) addr, start, end);
*valp = 0;
return ret;
}
@@ -448,8 +430,8 @@
offset = addr - start;
*valp = *(unw_word_t *)&stack->data[offset];
- pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n",
- (void *)addr, (unsigned long)*valp, offset);
+ pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
+ (void *) addr, (unsigned long)*valp, offset);
return 0;
}
@@ -459,6 +441,7 @@
{
struct unwind_info *ui = arg;
int id, ret;
+ u64 val;
/* Don't support write, I suspect we don't need it. */
if (__write) {
@@ -471,16 +454,17 @@
return 0;
}
- id = unwind__arch_reg_id(regnum);
+ id = libunwind__arch_reg_id(regnum);
if (id < 0)
return -EINVAL;
- ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs);
+ ret = perf_reg_value(&val, &ui->sample->user_regs, id);
if (ret) {
pr_err("unwind: can't read reg %d\n", regnum);
return ret;
}
+ *valp = (unw_word_t) val;
pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
return 0;
}
@@ -563,7 +547,7 @@
unw_word_t ip;
unw_get_reg(&c, UNW_REG_IP, &ip);
- ret = entry(ip, ui->thread, ui->machine, cb, arg);
+ ret = ip ? entry(ip, ui->thread, ui->machine, cb, arg) : 0;
}
unw_destroy_addr_space(addr_space);
@@ -572,13 +556,11 @@
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct machine *machine, struct thread *thread,
- u64 sample_uregs, struct perf_sample *data,
- int max_stack)
+ struct perf_sample *data, int max_stack)
{
- unw_word_t ip;
+ u64 ip;
struct unwind_info ui = {
.sample = data,
- .sample_uregs = sample_uregs,
.thread = thread,
.machine = machine,
};
@@ -587,7 +569,7 @@
if (!data->user_regs.regs)
return -EINVAL;
- ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs);
+ ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
if (ret)
return ret;
@@ -595,5 +577,5 @@
if (ret)
return -ENOMEM;
- return get_entries(&ui, cb, arg, max_stack);
+ return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0;
}
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index d5966f49..b031316 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -13,24 +13,25 @@
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct machine *machine,
struct thread *thread,
- u64 sample_uregs,
struct perf_sample *data, int max_stack);
-int unwind__arch_reg_id(int regnum);
+/* libunwind specific */
+#ifdef HAVE_LIBUNWIND_SUPPORT
+int libunwind__arch_reg_id(int regnum);
+#endif
#else
static inline int
unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
void *arg __maybe_unused,
struct machine *machine __maybe_unused,
struct thread *thread __maybe_unused,
- u64 sample_uregs __maybe_unused,
struct perf_sample *data __maybe_unused,
int max_stack __maybe_unused)
{
return 0;
}
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
#endif /* __UNWIND_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 42ad667b..9f66549 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,6 +1,6 @@
#include "../perf.h"
#include "util.h"
-#include "fs.h"
+#include <api/fs/fs.h>
#include <sys/mman.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index d664182..aa290c0d 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -201,6 +201,7 @@
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
+ err = -errno;
printf("Can't create queue\n");
goto err_out;
}
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
index 587561d..9b17e81 100644
--- a/tools/testing/selftests/rcutorture/bin/functions.sh
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -96,6 +96,7 @@
echo qemu-system-ppc64
else
echo Cannot figure out what qemu command to use! 1>&2
+ echo file $1 output: $u
# Usually this will be one of /usr/bin/qemu-system-*
# Use RCU_QEMU_CMD environment variable or appropriate
# argument to top-level script.
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
new file mode 100755
index 0000000..829186e
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Analyze a given results directory for locktorture progress.
+#
+# Usage: sh kvm-recheck-lock.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+ :
+else
+ echo Unreadable results directory: $i
+ exit 1
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+ncs=`grep "Writes: Total:" $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* Total: //' -e 's/ .*$//'`
+if test -z "$ncs"
+then
+ echo $configfile
+else
+ title="$configfile ------- $ncs acquisitions/releases"
+ dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+ if test -z "$dur"
+ then
+ :
+ else
+ ncsps=`awk -v ncs=$ncs -v dur=$dur '
+ BEGIN { print ncs / dur }' < /dev/null`
+ title="$title ($ncsps per second)"
+ fi
+ echo $title
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
new file mode 100755
index 0000000..d75b1dc5
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcutorture progress.
+#
+# Usage: sh kvm-recheck-rcu.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+ :
+else
+ echo Unreadable results directory: $i
+ exit 1
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
+if test -z "$ngps"
+then
+ echo $configfile
+else
+ title="$configfile ------- $ngps grace periods"
+ dur=`sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+ if test -z "$dur"
+ then
+ :
+ else
+ ngpsps=`awk -v ngps=$ngps -v dur=$dur '
+ BEGIN { print ngps / dur }' < /dev/null`
+ title="$title ($ngpsps per second)"
+ fi
+ echo $title
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index baef09f..a44daaa 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Given the results directories for previous KVM runs of rcutorture,
+# Given the results directories for previous KVM-based torture runs,
# check the build and console output for errors. Given a directory
# containing results directories, this recursively checks them all.
#
@@ -27,11 +27,18 @@
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
for rd in "$@"
do
+ firsttime=1
dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u`
for i in $dirs
do
- configfile=`echo $i | sed -e 's/^.*\///'`
- echo $configfile
+ if test -n "$firsttime"
+ then
+ firsttime=""
+ resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'`
+ head -1 $resdir/log
+ fi
+ TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+ kvm-recheck-${TORTURE_SUITE}.sh $i
configcheck.sh $i/.config $i/ConfigFragment
parse-build.sh $i/Make.out $configfile
parse-rcutorture.sh $i/console.log $configfile
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
similarity index 79%
rename from tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh
rename to tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 151b237..94b28bb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -6,15 +6,15 @@
# Execute this in the source tree. Do not run it as a background task
# because qemu does not seem to like that much.
#
-# Usage: sh kvm-test-1-rcu.sh config builddir resdir minutes qemu-args bootargs
+# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
#
-# qemu-args defaults to "" -- you will want "-nographic" if running headless.
-# bootargs defaults to "root=/dev/sda noapic selinux=0 console=ttyS0"
-# "initcall_debug debug rcutorture.stat_interval=15"
-# "rcutorture.shutdown_secs=$((minutes * 60))"
-# "rcutorture.rcutorture_runnable=1"
+# qemu-args defaults to "-nographic", along with arguments specifying the
+# number of CPUs and other options generated from
+# the underlying CPU architecture.
+# boot_args defaults to value returned by the per_version_boot_params
+# shell function.
#
-# Anything you specify for either qemu-args or bootargs is appended to
+# Anything you specify for either qemu-args or boot_args is appended to
# the default values. The "-smp" value is deduced from the contents of
# the config fragment.
#
@@ -40,32 +40,34 @@
grace=120
-T=/tmp/kvm-test-1-rcu.sh.$$
+T=/tmp/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
. $KVM/bin/functions.sh
. $KVPATH/ver_functions.sh
config_template=${1}
+config_dir=`echo $config_template | sed -e 's,/[^/]*$,,'`
title=`echo $config_template | sed -e 's/^.*\///'`
builddir=${2}
if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
then
- echo "kvm-test-1-rcu.sh :$builddir: Not a writable directory, cannot build into it"
+ echo "kvm-test-1-run.sh :$builddir: Not a writable directory, cannot build into it"
exit 1
fi
resdir=${3}
if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir"
then
- echo "kvm-test-1-rcu.sh :$resdir: Not a writable directory, cannot build into it"
+ echo "kvm-test-1-run.sh :$resdir: Not a writable directory, cannot store results into it"
exit 1
fi
cp $config_template $resdir/ConfigFragment
echo ' ---' `date`: Starting build
echo ' ---' Kconfig fragment at: $config_template >> $resdir/log
-cat << '___EOF___' >> $T
-CONFIG_RCU_TORTURE_TEST=y
-___EOF___
+if test -r "$config_dir/CFcommon"
+then
+ cat < $config_dir/CFcommon >> $T
+fi
# Optimizations below this point
# CONFIG_USB=n
# CONFIG_SECURITY=n
@@ -96,11 +98,23 @@
cp $builddir/.config $resdir
cp $builddir/arch/x86/boot/bzImage $resdir
parse-build.sh $resdir/Make.out $title
+ if test -f $builddir.wait
+ then
+ mv $builddir.wait $builddir.ready
+ fi
else
cp $builddir/Make*.out $resdir
echo Build failed, not running KVM, see $resdir.
+ if test -f $builddir.wait
+ then
+ mv $builddir.wait $builddir.ready
+ fi
exit 1
fi
+while test -f $builddir.ready
+do
+ sleep 1
+done
minutes=$4
seconds=$(($minutes * 60))
qemu_args=$5
@@ -111,9 +125,10 @@
echo ' ---' `date`: Starting kernel
# Determine the appropriate flavor of qemu command.
-QEMU="`identify_qemu $builddir/vmlinux.o`"
+QEMU="`identify_qemu $builddir/vmlinux`"
# Generate -smp qemu argument.
+qemu_args="-nographic $qemu_args"
cpu_count=`configNR_CPUS.sh $config_template`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
@@ -133,12 +148,8 @@
# Pull in Kconfig-fragment boot parameters
boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
-# Generate CPU-hotplug boot parameters
-boot_args="`rcutorture_param_onoff "$boot_args" $builddir/.config`"
-# Generate rcu_barrier() boot parameter
-boot_args="`rcutorture_param_n_barrier_cbs "$boot_args"`"
-# Pull in standard rcutorture boot arguments
-boot_args="$boot_args rcutorture.stat_interval=15 rcutorture.shutdown_secs=$seconds rcutorture.rcutorture_runnable=1"
+# Generate kernel-version-specific boot parameters
+boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
echo $QEMU $qemu_args -m 512 -kernel $builddir/arch/x86/boot/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
if test -n "$RCU_BUILDONLY"
@@ -188,5 +199,5 @@
fi
cp $builddir/console.log $resdir
-parse-rcutorture.sh $resdir/console.log $title
+parse-${TORTURE_SUITE}torture.sh $resdir/console.log $title
parse-console.sh $resdir/console.log $title
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 1b7923b..5a78cbf 100644
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -30,14 +30,21 @@
scriptname=$0
args="$*"
+T=/tmp/kvm.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
dur=30
+dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
PATH=${KVM}/bin:$PATH; export PATH
builddir="${KVM}/b1"
RCU_INITRD="$KVM/initrd"; export RCU_INITRD
RCU_KMAKE_ARG=""; export RCU_KMAKE_ARG
+TORTURE_SUITE=rcu
resdir=""
configs=""
+cpus=0
ds=`date +%Y.%m.%d-%H:%M:%S`
kversion=""
@@ -49,7 +56,9 @@
echo " --builddir absolute-pathname"
echo " --buildonly"
echo " --configs \"config-file list\""
+ echo " --cpus N"
echo " --datestamp string"
+ echo " --dryrun sched|script"
echo " --duration minutes"
echo " --interactive"
echo " --kmake-arg kernel-make-arguments"
@@ -58,8 +67,9 @@
echo " --no-initrd"
echo " --qemu-args qemu-system-..."
echo " --qemu-cmd qemu-system-..."
- echo " --results absolute-pathname"
echo " --relbuilddir relative-pathname"
+ echo " --results absolute-pathname"
+ echo " --torture rcu"
exit 1
}
@@ -85,11 +95,21 @@
configs="$2"
shift
;;
+ --cpus)
+ checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
+ cpus=$2
+ shift
+ ;;
--datestamp)
checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--'
ds=$2
shift
;;
+ --dryrun)
+ checkarg --dryrun "sched|script" $# "$2" 'sched\|script' '^--'
+ dryrun=$2
+ shift
+ ;;
--duration)
checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
dur=$2
@@ -138,6 +158,11 @@
resdir=$2
shift
;;
+ --torture)
+ checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
+ TORTURE_SUITE=$2
+ shift
+ ;;
*)
echo Unknown argument $1
usage
@@ -146,7 +171,7 @@
shift
done
-CONFIGFRAG=${KVM}/configs; export CONFIGFRAG
+CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
KVPATH=${CONFIGFRAG}/$kversion; export KVPATH
if test -z "$configs"
@@ -157,54 +182,231 @@
if test -z "$resdir"
then
resdir=$KVM/res
- if ! test -e $resdir
- then
- mkdir $resdir || :
- fi
-else
+fi
+
+if test "$dryrun" = ""
+then
if ! test -e $resdir
then
mkdir -p "$resdir" || :
fi
-fi
-mkdir $resdir/$ds
-touch $resdir/$ds/log
-echo $scriptname $args >> $resdir/$ds/log
+ mkdir $resdir/$ds
-pwd > $resdir/$ds/testid.txt
-if test -d .git
-then
- git status >> $resdir/$ds/testid.txt
- git rev-parse HEAD >> $resdir/$ds/testid.txt
-fi
-builddir=$KVM/b1
-if ! test -e $builddir
-then
- mkdir $builddir || :
+ # Be noisy only if running the script.
+ echo Results directory: $resdir/$ds
+ echo $scriptname $args
+
+ touch $resdir/$ds/log
+ echo $scriptname $args >> $resdir/$ds/log
+ echo ${TORTURE_SUITE} > $resdir/$ds/TORTURE_SUITE
+
+ pwd > $resdir/$ds/testid.txt
+ if test -d .git
+ then
+ git status >> $resdir/$ds/testid.txt
+ git rev-parse HEAD >> $resdir/$ds/testid.txt
+ fi
fi
+# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
+touch $T/cfgcpu
for CF in $configs
do
- # Running TREE01 multiple times creates TREE01, TREE01.2, TREE01.3, ...
- rd=$resdir/$ds/$CF
- if test -d "${rd}"
+ if test -f "$CONFIGFRAG/$kversion/$CF"
then
- n="`ls -d "${rd}"* | grep '\.[0-9]\+$' |
- sed -e 's/^.*\.\([0-9]\+\)/\1/' |
- sort -k1n | tail -1`"
- if test -z "$n"
- then
- rd="${rd}.2"
- else
- n="`expr $n + 1`"
- rd="${rd}.${n}"
- fi
+ echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu
+ else
+ echo "The --configs file $CF does not exist, terminating."
+ exit 1
fi
- mkdir "${rd}"
- echo Results directory: $rd
- kvm-test-1-rcu.sh $CONFIGFRAG/$kversion/$CF $builddir $rd $dur "-nographic $RCU_QEMU_ARG" "rcutorture.test_no_idle_hz=1 rcutorture.verbose=1 $RCU_BOOTARGS"
done
+sort -k2nr $T/cfgcpu > $T/cfgcpu.sort
+
+# Use a greedy bin-packing algorithm, sorting the list accordingly.
+awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
+BEGIN {
+ njobs = 0;
+}
+
+{
+ # Read file of tests and corresponding required numbers of CPUs.
+ cf[njobs] = $1;
+ cpus[njobs] = $2;
+ njobs++;
+}
+
+END {
+ alldone = 0;
+ batch = 0;
+ nc = -1;
+
+ # Each pass through the following loop creates on test batch
+ # that can be executed concurrently given ncpus. Note that a
+ # given test that requires more than the available CPUs will run in
+ # their own batch. Such tests just have to make do with what
+ # is available.
+ while (nc != ncpus) {
+ batch++;
+ nc = ncpus;
+
+ # Each pass through the following loop considers one
+ # test for inclusion in the current batch.
+ for (i = 0; i < njobs; i++) {
+ if (done[i])
+ continue; # Already part of a batch.
+ if (nc >= cpus[i] || nc == ncpus) {
+
+ # This test fits into the current batch.
+ done[i] = batch;
+ nc -= cpus[i];
+ if (nc <= 0)
+ break; # Too-big test in its own batch.
+ }
+ }
+ }
+
+ # Dump out the tests in batch order.
+ for (b = 1; b <= batch; b++)
+ for (i = 0; i < njobs; i++)
+ if (done[i] == b)
+ print cf[i], cpus[i];
+}'
+
+# Generate a script to execute the tests in appropriate batches.
+cat << ___EOF___ > $T/script
+TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
+___EOF___
+awk < $T/cfgcpu.pack \
+ -v CONFIGDIR="$CONFIGFRAG/$kversion/" \
+ -v KVM="$KVM" \
+ -v ncpus=$cpus \
+ -v rd=$resdir/$ds/ \
+ -v dur=$dur \
+ -v RCU_QEMU_ARG=$RCU_QEMU_ARG \
+ -v RCU_BOOTARGS=$RCU_BOOTARGS \
+'BEGIN {
+ i = 0;
+}
+
+{
+ cf[i] = $1;
+ cpus[i] = $2;
+ i++;
+}
+
+# Dump out the scripting required to run one test batch.
+function dump(first, pastlast)
+{
+ print "echo ----Start batch: `date`";
+ print "echo ----Start batch: `date` >> " rd "/log";
+ jn=1
+ for (j = first; j < pastlast; j++) {
+ builddir=KVM "/b" jn
+ cpusr[jn] = cpus[j];
+ if (cfrep[cf[j]] == "") {
+ cfr[jn] = cf[j];
+ cfrep[cf[j]] = 1;
+ } else {
+ cfrep[cf[j]]++;
+ cfr[jn] = cf[j] "." cfrep[cf[j]];
+ }
+ if (cpusr[jn] > ncpus && ncpus != 0)
+ ovf = "(!)";
+ else
+ ovf = "";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
+ print "rm -f " builddir ".*";
+ print "touch " builddir ".wait";
+ print "mkdir " builddir " > /dev/null 2>&1 || :";
+ print "mkdir " rd cfr[jn] " || :";
+ print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" RCU_QEMU_ARG "\" \"" RCU_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
+ print "while test -f " builddir ".wait"
+ print "do"
+ print "\tsleep 1"
+ print "done"
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
+ jn++;
+ }
+ for (j = 1; j < jn; j++) {
+ builddir=KVM "/b" j
+ print "rm -f " builddir ".ready"
+ print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
+ print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+ }
+ print "wait"
+ print "echo ---- All kernel runs complete. `date`";
+ print "echo ---- All kernel runs complete. `date` >> " rd "/log";
+ for (j = 1; j < jn; j++) {
+ builddir=KVM "/b" j
+ print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
+ print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
+ print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
+ print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
+ }
+}
+
+END {
+ njobs = i;
+ nc = ncpus;
+ first = 0;
+
+ # Each pass through the following loop considers one test.
+ for (i = 0; i < njobs; i++) {
+ if (ncpus == 0) {
+ # Sequential test specified, each test its own batch.
+ dump(i, i + 1);
+ first = i;
+ } else if (nc < cpus[i] && i != 0) {
+ # Out of CPUs, dump out a batch.
+ dump(first, i);
+ first = i;
+ nc = ncpus;
+ }
+ # Account for the CPUs needed by the current test.
+ nc -= cpus[i];
+ }
+ # Dump the last batch.
+ if (ncpus != 0)
+ dump(first, i);
+}' >> $T/script
+
+if test "$dryrun" = script
+then
+ # Dump out the script, but define the environment variables that
+ # it needs to run standalone.
+ echo CONFIGFRAG="$CONFIGFRAG; export CONFIGFRAG"
+ echo KVM="$KVM; export KVM"
+ echo KVPATH="$KVPATH; export KVPATH"
+ echo PATH="$PATH; export PATH"
+ echo RCU_BUILDONLY="$RCU_BUILDONLY; export RCU_BUILDONLY"
+ echo RCU_INITRD="$RCU_INITRD; export RCU_INITRD"
+ echo RCU_KMAKE_ARG="$RCU_KMAKE_ARG; export RCU_KMAKE_ARG"
+ echo RCU_QEMU_CMD="$RCU_QEMU_CMD; export RCU_QEMU_CMD"
+ echo RCU_QEMU_INTERACTIVE="$RCU_QEMU_INTERACTIVE; export RCU_QEMU_INTERACTIVE"
+ echo RCU_QEMU_MAC="$RCU_QEMU_MAC; export RCU_QEMU_MAC"
+ echo "mkdir -p "$resdir" || :"
+ echo "mkdir $resdir/$ds"
+ cat $T/script
+ exit 0
+elif test "$dryrun" = sched
+then
+ # Extract the test run schedule from the script.
+ egrep 'start batch|Starting build\.' $T/script |
+ sed -e 's/:.*$//' -e 's/^echo //'
+ exit 0
+else
+ # Not a dryru, so run the script.
+ sh $T/script
+fi
+
# Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier
+echo
+echo
echo " --- `date` Test summary:"
+echo Results directory: $resdir/$ds
kvm-recheck.sh $resdir/$ds
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P b/tools/testing/selftests/rcutorture/configs/lock/BUSTED
similarity index 63%
copy from tools/testing/selftests/rcutorture/configs/SRCU-P
copy to tools/testing/selftests/rcutorture/configs/lock/BUSTED
index 6650e00..1d1da14 100644
--- a/tools/testing/selftests/rcutorture/configs/SRCU-P
+++ b/tools/testing/selftests/rcutorture/configs/lock/BUSTED
@@ -1,8 +1,6 @@
-CONFIG_RCU_TRACE=n
CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot
new file mode 100644
index 0000000..6386c15
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot
@@ -0,0 +1 @@
+locktorture.torture_type=lock_busted
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
new file mode 100644
index 0000000..a061b22
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
@@ -0,0 +1 @@
+LOCK01
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFcommon b/tools/testing/selftests/rcutorture/configs/lock/CFcommon
new file mode 100644
index 0000000..e372dc2
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/CFcommon
@@ -0,0 +1,2 @@
+CONFIG_LOCK_TORTURE_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P b/tools/testing/selftests/rcutorture/configs/lock/LOCK01
similarity index 73%
copy from tools/testing/selftests/rcutorture/configs/SRCU-P
copy to tools/testing/selftests/rcutorture/configs/lock/LOCK01
index 6650e00..a9625e3 100644
--- a/tools/testing/selftests/rcutorture/configs/SRCU-P
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK01
@@ -1,8 +1,6 @@
-CONFIG_RCU_TRACE=n
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
new file mode 100644
index 0000000..9746ea1
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# locktorture_param_onoff bootparam-string config-file
+#
+# Adds onoff locktorture module parameters to kernels having it.
+locktorture_param_onoff () {
+ if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
+ then
+ echo CPU-hotplug kernel, adding locktorture onoff. 1>&2
+ echo locktorture.onoff_interval=3 locktorture.onoff_holdoff=30
+ fi
+}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+ echo $1 `locktorture_param_onoff "$1" "$2"` \
+ locktorture.stat_interval=15 \
+ locktorture.shutdown_secs=$3 \
+ locktorture.locktorture_runnable=1 \
+ locktorture.verbose=1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
similarity index 75%
copy from tools/testing/selftests/rcutorture/configs/SRCU-P
copy to tools/testing/selftests/rcutorture/configs/rcu/BUSTED
index 6650e00..48d8a24 100644
--- a/tools/testing/selftests/rcutorture/configs/SRCU-P
+++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
@@ -1,8 +1,7 @@
CONFIG_RCU_TRACE=n
CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot
new file mode 100644
index 0000000..6804f9d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=rcu_busted
diff --git a/tools/testing/selftests/rcutorture/configs/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/CFLIST
rename to tools/testing/selftests/rcutorture/configs/rcu/CFLIST
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
new file mode 100644
index 0000000..d2d2a86
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
@@ -0,0 +1,2 @@
+CONFIG_RCU_TORTURE_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-N b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
similarity index 75%
rename from tools/testing/selftests/rcutorture/configs/SRCU-N
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
index 10a0e27..9fbb41b 100644
--- a/tools/testing/selftests/rcutorture/configs/SRCU-N
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
@@ -1,8 +1,7 @@
CONFIG_RCU_TRACE=n
CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-N.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/SRCU-N.boot
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
similarity index 86%
rename from tools/testing/selftests/rcutorture/configs/SRCU-P
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
index 6650e00..4b6f272 100644
--- a/tools/testing/selftests/rcutorture/configs/SRCU-P
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
@@ -5,4 +5,3 @@
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/SRCU-P.boot
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
diff --git a/tools/testing/selftests/rcutorture/configs/TINY01 b/tools/testing/selftests/rcutorture/configs/rcu/TINY01
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TINY01
rename to tools/testing/selftests/rcutorture/configs/rcu/TINY01
index 0c2823f..0a63e07 100644
--- a/tools/testing/selftests/rcutorture/configs/TINY01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY01
@@ -10,4 +10,3 @@
CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PREEMPT_COUNT=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TINY02 b/tools/testing/selftests/rcutorture/configs/rcu/TINY02
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TINY02
rename to tools/testing/selftests/rcutorture/configs/rcu/TINY02
index e5072d7..f4feaee 100644
--- a/tools/testing/selftests/rcutorture/configs/TINY02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02
@@ -10,4 +10,3 @@
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PREEMPT_COUNT=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
similarity index 95%
rename from tools/testing/selftests/rcutorture/configs/TREE01
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 141119a..9c827ec 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -20,4 +20,3 @@
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/TREE01.boot
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
diff --git a/tools/testing/selftests/rcutorture/configs/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TREE02
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE02
index 2d4d096..1a777b5 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -7,7 +7,7 @@
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
-CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=n
CONFIG_HOTPLUG_CPU=n
CONFIG_SUSPEND=n
@@ -23,4 +23,3 @@
CONFIG_RCU_CPU_STALL_VERBOSE=y
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
similarity index 95%
rename from tools/testing/selftests/rcutorture/configs/TREE03
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE03
index a47de5b..c1f111c 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
@@ -20,4 +20,3 @@
CONFIG_RCU_BOOST=y
CONFIG_RCU_BOOST_PRIO=2
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE04
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 8d839b8..7dbd27c 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -22,4 +22,3 @@
CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_RCU_CPU_STALL_VERBOSE=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/TREE04.boot
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
diff --git a/tools/testing/selftests/rcutorture/configs/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE05
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE05
index b5ba72e..d0f32e5 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE05
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
@@ -22,4 +22,3 @@
CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/TREE05.boot
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
diff --git a/tools/testing/selftests/rcutorture/configs/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE06
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 7c95ab4..2e477df 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -23,4 +23,3 @@
CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
similarity index 95%
rename from tools/testing/selftests/rcutorture/configs/TREE07
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE07
index 1467404..042f86e 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -21,4 +21,3 @@
CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE08
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE08
index 7d097a6..3438cee 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -23,4 +23,3 @@
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE08-T
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
index 442c4e4..bf4523d 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE08-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
@@ -23,4 +23,3 @@
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
similarity index 95%
rename from tools/testing/selftests/rcutorture/configs/TREE09
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE09
index 0d1ec0d..81e4f7c 100644
--- a/tools/testing/selftests/rcutorture/configs/TREE09
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
@@ -18,4 +18,3 @@
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/CFLIST
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh
similarity index 70%
rename from tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh
index e805253..5ace37a 100644
--- a/tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh
@@ -20,16 +20,14 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-# rcutorture_param_n_barrier_cbs bootparam-string
+# per_version_boot_params bootparam-string config-file seconds
#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
- echo $1
-}
-
-# rcutorture_param_onoff bootparam-string config-file
-#
-# Adds onoff rcutorture module parameters to kernels having it.
-rcutorture_param_onoff () {
- echo $1
+# Adds per-version torture-module parameters to kernels supporting them.
+# Which old kernels do not.
+per_version_boot_params () {
+ echo rcutorture.stat_interval=15 \
+ rcutorture.shutdown_secs=$3 \
+ rcutorture.rcutorture_runnable=1 \
+ rcutorture.test_no_idle_hz=1 \
+ rcutorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/CFLIST
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/CFLIST
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH
diff --git a/tools/testing/selftests/rcutorture/configs/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh
similarity index 72%
rename from tools/testing/selftests/rcutorture/configs/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh
index 5e40ead..bae5569 100644
--- a/tools/testing/selftests/rcutorture/configs/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh
@@ -20,18 +20,6 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-# rcutorture_param_n_barrier_cbs bootparam-string
-#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
- if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
- then
- echo $1
- else
- echo $1 rcutorture.n_barrier_cbs=4
- fi
-}
-
# rcutorture_param_onoff bootparam-string config-file
#
# Adds onoff rcutorture module parameters to kernels having it.
@@ -39,8 +27,18 @@
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
- echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
- else
- echo $1
+ echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+ echo $1 `rcutorture_param_onoff "$1" "$2"` \
+ rcutorture.stat_interval=15 \
+ rcutorture.shutdown_secs=$3 \
+ rcutorture.rcutorture_runnable=1 \
+ rcutorture.test_no_idle_hz=1 \
+ rcutorture.verbose=1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/CFLIST
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH
similarity index 100%
rename from tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH
diff --git a/tools/testing/selftests/rcutorture/configs/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh
similarity index 72%
copy from tools/testing/selftests/rcutorture/configs/ver_functions.sh
copy to tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh
index 5e40ead..8977d8d 100644
--- a/tools/testing/selftests/rcutorture/configs/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh
@@ -26,9 +26,9 @@
rcutorture_param_n_barrier_cbs () {
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
then
- echo $1
+ :
else
- echo $1 rcutorture.n_barrier_cbs=4
+ echo rcutorture.n_barrier_cbs=4
fi
}
@@ -39,8 +39,19 @@
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
- echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
- else
- echo $1
+ echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+ echo $1 `rcutorture_param_onoff "$1" "$2"` \
+ `rcutorture_param_n_barrier_cbs "$1"` \
+ rcutorture.stat_interval=15 \
+ rcutorture.shutdown_secs=$3 \
+ rcutorture.rcutorture_runnable=1 \
+ rcutorture.test_no_idle_hz=1 \
+ rcutorture.verbose=1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
similarity index 72%
copy from tools/testing/selftests/rcutorture/configs/ver_functions.sh
copy to tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 5e40ead..8977d8d 100644
--- a/tools/testing/selftests/rcutorture/configs/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -26,9 +26,9 @@
rcutorture_param_n_barrier_cbs () {
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
then
- echo $1
+ :
else
- echo $1 rcutorture.n_barrier_cbs=4
+ echo rcutorture.n_barrier_cbs=4
fi
}
@@ -39,8 +39,19 @@
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
- echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
- else
- echo $1
+ echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+ echo $1 `rcutorture_param_onoff "$1" "$2"` \
+ `rcutorture_param_n_barrier_cbs "$1"` \
+ rcutorture.stat_interval=15 \
+ rcutorture.shutdown_secs=$3 \
+ rcutorture.rcutorture_runnable=1 \
+ rcutorture.test_no_idle_hz=1 \
+ rcutorture.verbose=1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh
deleted file mode 100644
index c37432f..0000000
--- a/tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#
-# Kernel-version-dependent shell functions for the rest of the scripts.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (C) IBM Corporation, 2013
-#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-
-# rcutorture_param_n_barrier_cbs bootparam-string
-#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
- echo $1
-}
-
-# rcutorture_param_onoff bootparam-string config-file
-#
-# Adds onoff rcutorture module parameters to kernels having it.
-rcutorture_param_onoff () {
- if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
- then
- echo CPU-hotplug kernel, adding rcutorture onoff.
- echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
- else
- echo $1
- fi
-}
diff --git a/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh
deleted file mode 100644
index 6a5f13a..0000000
--- a/tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#
-# Kernel-version-dependent shell functions for the rest of the scripts.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (C) IBM Corporation, 2013
-#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-
-# rcutorture_param_n_barrier_cbs bootparam-string
-#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
- if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
- then
- echo $1
- else
- echo $1 rcutorture.n_barrier_cbs=4
- fi
-}
-
-# rcutorture_param_onoff bootparam-string config-file
-#
-# Adds onoff rcutorture module parameters to kernels having it.
-rcutorture_param_onoff () {
- if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
- then
- echo CPU-hotplug kernel, adding rcutorture onoff.
- echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
- else
- echo $1
- fi
-}
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
similarity index 100%
rename from tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt
rename to tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 03a0381..b5ec7fb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -102,7 +102,7 @@
static void mark_page_dirty_in_slot(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn);
-bool kvm_rebooting;
+__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
static bool largepages_enabled = true;