mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Lots of overlapping changes and parallel additions, stuff like that. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
19b7e21c55
4
.mailmap
4
.mailmap
|
@ -108,6 +108,10 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
|
||||||
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
|
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
|
||||||
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
|
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
|
||||||
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
|
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
|
||||||
|
Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
|
||||||
|
Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
|
||||||
|
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
|
||||||
|
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
||||||
Jean Tourrilhes <jt@hpl.hp.com>
|
Jean Tourrilhes <jt@hpl.hp.com>
|
||||||
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
||||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||||
|
|
|
@ -486,6 +486,8 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||||
/sys/devices/system/cpu/vulnerabilities/mds
|
/sys/devices/system/cpu/vulnerabilities/mds
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||||
Date: January 2018
|
Date: January 2018
|
||||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||||
Description: Information about CPU vulnerabilities
|
Description: Information about CPU vulnerabilities
|
||||||
|
|
|
@ -12,3 +12,5 @@ are configurable at compile, boot or run time.
|
||||||
spectre
|
spectre
|
||||||
l1tf
|
l1tf
|
||||||
mds
|
mds
|
||||||
|
tsx_async_abort
|
||||||
|
multihit.rst
|
||||||
|
|
|
@ -0,0 +1,163 @@
|
||||||
|
iTLB multihit
|
||||||
|
=============
|
||||||
|
|
||||||
|
iTLB multihit is an erratum where some processors may incur a machine check
|
||||||
|
error, possibly resulting in an unrecoverable CPU lockup, when an
|
||||||
|
instruction fetch hits multiple entries in the instruction TLB. This can
|
||||||
|
occur when the page size is changed along with either the physical address
|
||||||
|
or cache type. A malicious guest running on a virtualized system can
|
||||||
|
exploit this erratum to perform a denial of service attack.
|
||||||
|
|
||||||
|
|
||||||
|
Affected processors
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Variations of this erratum are present on most Intel Core and Xeon processor
|
||||||
|
models. The erratum is not present on:
|
||||||
|
|
||||||
|
- non-Intel processors
|
||||||
|
|
||||||
|
- Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont)
|
||||||
|
|
||||||
|
- Intel processors that have the PSCHANGE_MC_NO bit set in the
|
||||||
|
IA32_ARCH_CAPABILITIES MSR.
|
||||||
|
|
||||||
|
|
||||||
|
Related CVEs
|
||||||
|
------------
|
||||||
|
|
||||||
|
The following CVE entry is related to this issue:
|
||||||
|
|
||||||
|
============== =================================================
|
||||||
|
CVE-2018-12207 Machine Check Error Avoidance on Page Size Change
|
||||||
|
============== =================================================
|
||||||
|
|
||||||
|
|
||||||
|
Problem
|
||||||
|
-------
|
||||||
|
|
||||||
|
Privileged software, including OS and virtual machine managers (VMM), are in
|
||||||
|
charge of memory management. A key component in memory management is the control
|
||||||
|
of the page tables. Modern processors use virtual memory, a technique that creates
|
||||||
|
the illusion of a very large memory for processors. This virtual space is split
|
||||||
|
into pages of a given size. Page tables translate virtual addresses to physical
|
||||||
|
addresses.
|
||||||
|
|
||||||
|
To reduce latency when performing a virtual to physical address translation,
|
||||||
|
processors include a structure, called TLB, that caches recent translations.
|
||||||
|
There are separate TLBs for instruction (iTLB) and data (dTLB).
|
||||||
|
|
||||||
|
Under this errata, instructions are fetched from a linear address translated
|
||||||
|
using a 4 KB translation cached in the iTLB. Privileged software modifies the
|
||||||
|
paging structure so that the same linear address using large page size (2 MB, 4
|
||||||
|
MB, 1 GB) with a different physical address or memory type. After the page
|
||||||
|
structure modification but before the software invalidates any iTLB entries for
|
||||||
|
the linear address, a code fetch that happens on the same linear address may
|
||||||
|
cause a machine-check error which can result in a system hang or shutdown.
|
||||||
|
|
||||||
|
|
||||||
|
Attack scenarios
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Attacks against the iTLB multihit erratum can be mounted from malicious
|
||||||
|
guests in a virtualized system.
|
||||||
|
|
||||||
|
|
||||||
|
iTLB multihit system information
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
The Linux kernel provides a sysfs interface to enumerate the current iTLB
|
||||||
|
multihit status of the system:whether the system is vulnerable and which
|
||||||
|
mitigations are active. The relevant sysfs file is:
|
||||||
|
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||||
|
|
||||||
|
The possible values in this file are:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - Not affected
|
||||||
|
- The processor is not vulnerable.
|
||||||
|
* - KVM: Mitigation: Split huge pages
|
||||||
|
- Software changes mitigate this issue.
|
||||||
|
* - KVM: Vulnerable
|
||||||
|
- The processor is vulnerable, but no mitigation enabled
|
||||||
|
|
||||||
|
|
||||||
|
Enumeration of the erratum
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr
|
||||||
|
and will be set on CPU's which are mitigated against this issue.
|
||||||
|
|
||||||
|
======================================= =========== ===============================
|
||||||
|
IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model
|
||||||
|
IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model
|
||||||
|
IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable
|
||||||
|
======================================= =========== ===============================
|
||||||
|
|
||||||
|
|
||||||
|
Mitigation mechanism
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
This erratum can be mitigated by restricting the use of large page sizes to
|
||||||
|
non-executable pages. This forces all iTLB entries to be 4K, and removes
|
||||||
|
the possibility of multiple hits.
|
||||||
|
|
||||||
|
In order to mitigate the vulnerability, KVM initially marks all huge pages
|
||||||
|
as non-executable. If the guest attempts to execute in one of those pages,
|
||||||
|
the page is broken down into 4K pages, which are then marked executable.
|
||||||
|
|
||||||
|
If EPT is disabled or not available on the host, KVM is in control of TLB
|
||||||
|
flushes and the problematic situation cannot happen. However, the shadow
|
||||||
|
EPT paging mechanism used by nested virtualization is vulnerable, because
|
||||||
|
the nested guest can trigger multiple iTLB hits by modifying its own
|
||||||
|
(non-nested) page tables. For simplicity, KVM will make large pages
|
||||||
|
non-executable in all shadow paging modes.
|
||||||
|
|
||||||
|
Mitigation control on the kernel command line and KVM - module parameter
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
The KVM hypervisor mitigation mechanism for marking huge pages as
|
||||||
|
non-executable can be controlled with a module parameter "nx_huge_pages=".
|
||||||
|
The kernel command line allows to control the iTLB multihit mitigations at
|
||||||
|
boot time with the option "kvm.nx_huge_pages=".
|
||||||
|
|
||||||
|
The valid arguments for these options are:
|
||||||
|
|
||||||
|
========== ================================================================
|
||||||
|
force Mitigation is enabled. In this case, the mitigation implements
|
||||||
|
non-executable huge pages in Linux kernel KVM module. All huge
|
||||||
|
pages in the EPT are marked as non-executable.
|
||||||
|
If a guest attempts to execute in one of those pages, the page is
|
||||||
|
broken down into 4K pages, which are then marked executable.
|
||||||
|
|
||||||
|
off Mitigation is disabled.
|
||||||
|
|
||||||
|
auto Enable mitigation only if the platform is affected and the kernel
|
||||||
|
was not booted with the "mitigations=off" command line parameter.
|
||||||
|
This is the default option.
|
||||||
|
========== ================================================================
|
||||||
|
|
||||||
|
|
||||||
|
Mitigation selection guide
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
1. No virtualization in use
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The system is protected by the kernel unconditionally and no further
|
||||||
|
action is required.
|
||||||
|
|
||||||
|
2. Virtualization with trusted guests
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If the guest comes from a trusted source, you may assume that the guest will
|
||||||
|
not attempt to maliciously exploit these errata and no further action is
|
||||||
|
required.
|
||||||
|
|
||||||
|
3. Virtualization with untrusted guests
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
If the guest comes from an untrusted source, the guest host kernel will need
|
||||||
|
to apply iTLB multihit mitigation via the kernel command line or kvm
|
||||||
|
module parameter.
|
|
@ -0,0 +1,276 @@
|
||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
TAA - TSX Asynchronous Abort
|
||||||
|
======================================
|
||||||
|
|
||||||
|
TAA is a hardware vulnerability that allows unprivileged speculative access to
|
||||||
|
data which is available in various CPU internal buffers by using asynchronous
|
||||||
|
aborts within an Intel TSX transactional region.
|
||||||
|
|
||||||
|
Affected processors
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
This vulnerability only affects Intel processors that support Intel
|
||||||
|
Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8)
|
||||||
|
is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit
|
||||||
|
(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations
|
||||||
|
also mitigate against TAA.
|
||||||
|
|
||||||
|
Whether a processor is affected or not can be read out from the TAA
|
||||||
|
vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`.
|
||||||
|
|
||||||
|
Related CVEs
|
||||||
|
------------
|
||||||
|
|
||||||
|
The following CVE entry is related to this TAA issue:
|
||||||
|
|
||||||
|
============== ===== ===================================================
|
||||||
|
CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some
|
||||||
|
microprocessors utilizing speculative execution may
|
||||||
|
allow an authenticated user to potentially enable
|
||||||
|
information disclosure via a side channel with
|
||||||
|
local access.
|
||||||
|
============== ===== ===================================================
|
||||||
|
|
||||||
|
Problem
|
||||||
|
-------
|
||||||
|
|
||||||
|
When performing store, load or L1 refill operations, processors write
|
||||||
|
data into temporary microarchitectural structures (buffers). The data in
|
||||||
|
those buffers can be forwarded to load operations as an optimization.
|
||||||
|
|
||||||
|
Intel TSX is an extension to the x86 instruction set architecture that adds
|
||||||
|
hardware transactional memory support to improve performance of multi-threaded
|
||||||
|
software. TSX lets the processor expose and exploit concurrency hidden in an
|
||||||
|
application due to dynamically avoiding unnecessary synchronization.
|
||||||
|
|
||||||
|
TSX supports atomic memory transactions that are either committed (success) or
|
||||||
|
aborted. During an abort, operations that happened within the transactional region
|
||||||
|
are rolled back. An asynchronous abort takes place, among other options, when a
|
||||||
|
different thread accesses a cache line that is also used within the transactional
|
||||||
|
region when that access might lead to a data race.
|
||||||
|
|
||||||
|
Immediately after an uncompleted asynchronous abort, certain speculatively
|
||||||
|
executed loads may read data from those internal buffers and pass it to dependent
|
||||||
|
operations. This can be then used to infer the value via a cache side channel
|
||||||
|
attack.
|
||||||
|
|
||||||
|
Because the buffers are potentially shared between Hyper-Threads cross
|
||||||
|
Hyper-Thread attacks are possible.
|
||||||
|
|
||||||
|
The victim of a malicious actor does not need to make use of TSX. Only the
|
||||||
|
attacker needs to begin a TSX transaction and raise an asynchronous abort
|
||||||
|
which in turn potenitally leaks data stored in the buffers.
|
||||||
|
|
||||||
|
More detailed technical information is available in the TAA specific x86
|
||||||
|
architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
|
||||||
|
|
||||||
|
|
||||||
|
Attack scenarios
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Attacks against the TAA vulnerability can be implemented from unprivileged
|
||||||
|
applications running on hosts or guests.
|
||||||
|
|
||||||
|
As for MDS, the attacker has no control over the memory addresses that can
|
||||||
|
be leaked. Only the victim is responsible for bringing data to the CPU. As
|
||||||
|
a result, the malicious actor has to sample as much data as possible and
|
||||||
|
then postprocess it to try to infer any useful information from it.
|
||||||
|
|
||||||
|
A potential attacker only has read access to the data. Also, there is no direct
|
||||||
|
privilege escalation by using this technique.
|
||||||
|
|
||||||
|
|
||||||
|
.. _tsx_async_abort_sys_info:
|
||||||
|
|
||||||
|
TAA system information
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
The Linux kernel provides a sysfs interface to enumerate the current TAA status
|
||||||
|
of mitigated systems. The relevant sysfs file is:
|
||||||
|
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||||
|
|
||||||
|
The possible values in this file are:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - 'Vulnerable'
|
||||||
|
- The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied.
|
||||||
|
* - 'Vulnerable: Clear CPU buffers attempted, no microcode'
|
||||||
|
- The system tries to clear the buffers but the microcode might not support the operation.
|
||||||
|
* - 'Mitigation: Clear CPU buffers'
|
||||||
|
- The microcode has been updated to clear the buffers. TSX is still enabled.
|
||||||
|
* - 'Mitigation: TSX disabled'
|
||||||
|
- TSX is disabled.
|
||||||
|
* - 'Not affected'
|
||||||
|
- The CPU is not affected by this issue.
|
||||||
|
|
||||||
|
.. _ucode_needed:
|
||||||
|
|
||||||
|
Best effort mitigation mode
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If the processor is vulnerable, but the availability of the microcode-based
|
||||||
|
mitigation mechanism is not advertised via CPUID the kernel selects a best
|
||||||
|
effort mitigation mode. This mode invokes the mitigation instructions
|
||||||
|
without a guarantee that they clear the CPU buffers.
|
||||||
|
|
||||||
|
This is done to address virtualization scenarios where the host has the
|
||||||
|
microcode update applied, but the hypervisor is not yet updated to expose the
|
||||||
|
CPUID to the guest. If the host has updated microcode the protection takes
|
||||||
|
effect; otherwise a few CPU cycles are wasted pointlessly.
|
||||||
|
|
||||||
|
The state in the tsx_async_abort sysfs file reflects this situation
|
||||||
|
accordingly.
|
||||||
|
|
||||||
|
|
||||||
|
Mitigation mechanism
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
The kernel detects the affected CPUs and the presence of the microcode which is
|
||||||
|
required. If a CPU is affected and the microcode is available, then the kernel
|
||||||
|
enables the mitigation by default.
|
||||||
|
|
||||||
|
|
||||||
|
The mitigation can be controlled at boot time via a kernel command line option.
|
||||||
|
See :ref:`taa_mitigation_control_command_line`.
|
||||||
|
|
||||||
|
.. _virt_mechanism:
|
||||||
|
|
||||||
|
Virtualization mitigation
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Affected systems where the host has TAA microcode and TAA is mitigated by
|
||||||
|
having disabled TSX previously, are not vulnerable regardless of the status
|
||||||
|
of the VMs.
|
||||||
|
|
||||||
|
In all other cases, if the host either does not have the TAA microcode or
|
||||||
|
the kernel is not mitigated, the system might be vulnerable.
|
||||||
|
|
||||||
|
|
||||||
|
.. _taa_mitigation_control_command_line:
|
||||||
|
|
||||||
|
Mitigation control on the kernel command line
|
||||||
|
---------------------------------------------
|
||||||
|
|
||||||
|
The kernel command line allows to control the TAA mitigations at boot time with
|
||||||
|
the option "tsx_async_abort=". The valid arguments for this option are:
|
||||||
|
|
||||||
|
============ =============================================================
|
||||||
|
off This option disables the TAA mitigation on affected platforms.
|
||||||
|
If the system has TSX enabled (see next parameter) and the CPU
|
||||||
|
is affected, the system is vulnerable.
|
||||||
|
|
||||||
|
full TAA mitigation is enabled. If TSX is enabled, on an affected
|
||||||
|
system it will clear CPU buffers on ring transitions. On
|
||||||
|
systems which are MDS-affected and deploy MDS mitigation,
|
||||||
|
TAA is also mitigated. Specifying this option on those
|
||||||
|
systems will have no effect.
|
||||||
|
|
||||||
|
full,nosmt The same as tsx_async_abort=full, with SMT disabled on
|
||||||
|
vulnerable CPUs that have TSX enabled. This is the complete
|
||||||
|
mitigation. When TSX is disabled, SMT is not disabled because
|
||||||
|
CPU is not vulnerable to cross-thread TAA attacks.
|
||||||
|
============ =============================================================
|
||||||
|
|
||||||
|
Not specifying this option is equivalent to "tsx_async_abort=full".
|
||||||
|
|
||||||
|
The kernel command line also allows to control the TSX feature using the
|
||||||
|
parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
|
||||||
|
to control the TSX feature and the enumeration of the TSX feature bits (RTM
|
||||||
|
and HLE) in CPUID.
|
||||||
|
|
||||||
|
The valid options are:
|
||||||
|
|
||||||
|
============ =============================================================
|
||||||
|
off Disables TSX on the system.
|
||||||
|
|
||||||
|
Note that this option takes effect only on newer CPUs which are
|
||||||
|
not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1
|
||||||
|
and which get the new IA32_TSX_CTRL MSR through a microcode
|
||||||
|
update. This new MSR allows for the reliable deactivation of
|
||||||
|
the TSX functionality.
|
||||||
|
|
||||||
|
on Enables TSX.
|
||||||
|
|
||||||
|
Although there are mitigations for all known security
|
||||||
|
vulnerabilities, TSX has been known to be an accelerator for
|
||||||
|
several previous speculation-related CVEs, and so there may be
|
||||||
|
unknown security risks associated with leaving it enabled.
|
||||||
|
|
||||||
|
auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX
|
||||||
|
on the system.
|
||||||
|
============ =============================================================
|
||||||
|
|
||||||
|
Not specifying this option is equivalent to "tsx=off".
|
||||||
|
|
||||||
|
The following combinations of the "tsx_async_abort" and "tsx" are possible. For
|
||||||
|
affected platforms tsx=auto is equivalent to tsx=off and the result will be:
|
||||||
|
|
||||||
|
========= ========================== =========================================
|
||||||
|
tsx=on tsx_async_abort=full The system will use VERW to clear CPU
|
||||||
|
buffers. Cross-thread attacks are still
|
||||||
|
possible on SMT machines.
|
||||||
|
tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT
|
||||||
|
mitigated.
|
||||||
|
tsx=on tsx_async_abort=off The system is vulnerable.
|
||||||
|
tsx=off tsx_async_abort=full TSX might be disabled if microcode
|
||||||
|
provides a TSX control MSR. If so,
|
||||||
|
system is not vulnerable.
|
||||||
|
tsx=off tsx_async_abort=full,nosmt Ditto
|
||||||
|
tsx=off tsx_async_abort=off ditto
|
||||||
|
========= ========================== =========================================
|
||||||
|
|
||||||
|
|
||||||
|
For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU
|
||||||
|
buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0)
|
||||||
|
"tsx" command line argument has no effect.
|
||||||
|
|
||||||
|
For the affected platforms below table indicates the mitigation status for the
|
||||||
|
combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO
|
||||||
|
and TSX_CTRL_MSR.
|
||||||
|
|
||||||
|
======= ========= ============= ========================================
|
||||||
|
MDS_NO MD_CLEAR TSX_CTRL_MSR Status
|
||||||
|
======= ========= ============= ========================================
|
||||||
|
0 0 0 Vulnerable (needs microcode)
|
||||||
|
0 1 0 MDS and TAA mitigated via VERW
|
||||||
|
1 1 0 MDS fixed, TAA vulnerable if TSX enabled
|
||||||
|
because MD_CLEAR has no meaning and
|
||||||
|
VERW is not guaranteed to clear buffers
|
||||||
|
1 X 1 MDS fixed, TAA can be mitigated by
|
||||||
|
VERW or TSX_CTRL_MSR
|
||||||
|
======= ========= ============= ========================================
|
||||||
|
|
||||||
|
Mitigation selection guide
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
1. Trusted userspace and guests
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If all user space applications are from a trusted source and do not execute
|
||||||
|
untrusted code which is supplied externally, then the mitigation can be
|
||||||
|
disabled. The same applies to virtualized environments with trusted guests.
|
||||||
|
|
||||||
|
|
||||||
|
2. Untrusted userspace and guests
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If there are untrusted applications or guests on the system, enabling TSX
|
||||||
|
might allow a malicious actor to leak data from the host or from other
|
||||||
|
processes running on the same physical core.
|
||||||
|
|
||||||
|
If the microcode is available and the TSX is disabled on the host, attacks
|
||||||
|
are prevented in a virtualized environment as well, even if the VMs do not
|
||||||
|
explicitly enable the mitigation.
|
||||||
|
|
||||||
|
|
||||||
|
.. _taa_default_mitigations:
|
||||||
|
|
||||||
|
Default mitigations
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
The kernel's default action for vulnerable processors is:
|
||||||
|
|
||||||
|
- Deploy TSX disable mitigation (tsx_async_abort=full tsx=off).
|
|
@ -2055,6 +2055,25 @@
|
||||||
KVM MMU at runtime.
|
KVM MMU at runtime.
|
||||||
Default is 0 (off)
|
Default is 0 (off)
|
||||||
|
|
||||||
|
kvm.nx_huge_pages=
|
||||||
|
[KVM] Controls the software workaround for the
|
||||||
|
X86_BUG_ITLB_MULTIHIT bug.
|
||||||
|
force : Always deploy workaround.
|
||||||
|
off : Never deploy workaround.
|
||||||
|
auto : Deploy workaround based on the presence of
|
||||||
|
X86_BUG_ITLB_MULTIHIT.
|
||||||
|
|
||||||
|
Default is 'auto'.
|
||||||
|
|
||||||
|
If the software workaround is enabled for the host,
|
||||||
|
guests do need not to enable it for nested guests.
|
||||||
|
|
||||||
|
kvm.nx_huge_pages_recovery_ratio=
|
||||||
|
[KVM] Controls how many 4KiB pages are periodically zapped
|
||||||
|
back to huge pages. 0 disables the recovery, otherwise if
|
||||||
|
the value is N KVM will zap 1/Nth of the 4KiB pages every
|
||||||
|
minute. The default is 60.
|
||||||
|
|
||||||
kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
|
kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
|
||||||
Default is 1 (enabled)
|
Default is 1 (enabled)
|
||||||
|
|
||||||
|
@ -2636,6 +2655,13 @@
|
||||||
ssbd=force-off [ARM64]
|
ssbd=force-off [ARM64]
|
||||||
l1tf=off [X86]
|
l1tf=off [X86]
|
||||||
mds=off [X86]
|
mds=off [X86]
|
||||||
|
tsx_async_abort=off [X86]
|
||||||
|
kvm.nx_huge_pages=off [X86]
|
||||||
|
|
||||||
|
Exceptions:
|
||||||
|
This does not have any effect on
|
||||||
|
kvm.nx_huge_pages when
|
||||||
|
kvm.nx_huge_pages=force.
|
||||||
|
|
||||||
auto (default)
|
auto (default)
|
||||||
Mitigate all CPU vulnerabilities, but leave SMT
|
Mitigate all CPU vulnerabilities, but leave SMT
|
||||||
|
@ -2651,6 +2677,7 @@
|
||||||
be fully mitigated, even if it means losing SMT.
|
be fully mitigated, even if it means losing SMT.
|
||||||
Equivalent to: l1tf=flush,nosmt [X86]
|
Equivalent to: l1tf=flush,nosmt [X86]
|
||||||
mds=full,nosmt [X86]
|
mds=full,nosmt [X86]
|
||||||
|
tsx_async_abort=full,nosmt [X86]
|
||||||
|
|
||||||
mminit_loglevel=
|
mminit_loglevel=
|
||||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||||
|
@ -4848,6 +4875,71 @@
|
||||||
interruptions from clocksource watchdog are not
|
interruptions from clocksource watchdog are not
|
||||||
acceptable).
|
acceptable).
|
||||||
|
|
||||||
|
tsx= [X86] Control Transactional Synchronization
|
||||||
|
Extensions (TSX) feature in Intel processors that
|
||||||
|
support TSX control.
|
||||||
|
|
||||||
|
This parameter controls the TSX feature. The options are:
|
||||||
|
|
||||||
|
on - Enable TSX on the system. Although there are
|
||||||
|
mitigations for all known security vulnerabilities,
|
||||||
|
TSX has been known to be an accelerator for
|
||||||
|
several previous speculation-related CVEs, and
|
||||||
|
so there may be unknown security risks associated
|
||||||
|
with leaving it enabled.
|
||||||
|
|
||||||
|
off - Disable TSX on the system. (Note that this
|
||||||
|
option takes effect only on newer CPUs which are
|
||||||
|
not vulnerable to MDS, i.e., have
|
||||||
|
MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get
|
||||||
|
the new IA32_TSX_CTRL MSR through a microcode
|
||||||
|
update. This new MSR allows for the reliable
|
||||||
|
deactivation of the TSX functionality.)
|
||||||
|
|
||||||
|
auto - Disable TSX if X86_BUG_TAA is present,
|
||||||
|
otherwise enable TSX on the system.
|
||||||
|
|
||||||
|
Not specifying this option is equivalent to tsx=off.
|
||||||
|
|
||||||
|
See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
|
||||||
|
for more details.
|
||||||
|
|
||||||
|
tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async
|
||||||
|
Abort (TAA) vulnerability.
|
||||||
|
|
||||||
|
Similar to Micro-architectural Data Sampling (MDS)
|
||||||
|
certain CPUs that support Transactional
|
||||||
|
Synchronization Extensions (TSX) are vulnerable to an
|
||||||
|
exploit against CPU internal buffers which can forward
|
||||||
|
information to a disclosure gadget under certain
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
In vulnerable processors, the speculatively forwarded
|
||||||
|
data can be used in a cache side channel attack, to
|
||||||
|
access data to which the attacker does not have direct
|
||||||
|
access.
|
||||||
|
|
||||||
|
This parameter controls the TAA mitigation. The
|
||||||
|
options are:
|
||||||
|
|
||||||
|
full - Enable TAA mitigation on vulnerable CPUs
|
||||||
|
if TSX is enabled.
|
||||||
|
|
||||||
|
full,nosmt - Enable TAA mitigation and disable SMT on
|
||||||
|
vulnerable CPUs. If TSX is disabled, SMT
|
||||||
|
is not disabled because CPU is not
|
||||||
|
vulnerable to cross-thread TAA attacks.
|
||||||
|
off - Unconditionally disable TAA mitigation
|
||||||
|
|
||||||
|
Not specifying this option is equivalent to
|
||||||
|
tsx_async_abort=full. On CPUs which are MDS affected
|
||||||
|
and deploy MDS mitigation, TAA mitigation is not
|
||||||
|
required and doesn't provide any additional
|
||||||
|
mitigation.
|
||||||
|
|
||||||
|
For details see:
|
||||||
|
Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
|
||||||
|
|
||||||
turbografx.map[2|3]= [HW,JOY]
|
turbografx.map[2|3]= [HW,JOY]
|
||||||
TurboGraFX parallel port interface
|
TurboGraFX parallel port interface
|
||||||
Format:
|
Format:
|
||||||
|
|
|
@ -27,6 +27,7 @@ x86-specific Documentation
|
||||||
mds
|
mds
|
||||||
microcode
|
microcode
|
||||||
resctrl_ui
|
resctrl_ui
|
||||||
|
tsx_async_abort
|
||||||
usb-legacy-support
|
usb-legacy-support
|
||||||
i386/index
|
i386/index
|
||||||
x86_64/index
|
x86_64/index
|
||||||
|
|
|
@ -0,0 +1,117 @@
|
||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
TSX Async Abort (TAA) mitigation
|
||||||
|
================================
|
||||||
|
|
||||||
|
.. _tsx_async_abort:
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
TSX Async Abort (TAA) is a side channel attack on internal buffers in some
|
||||||
|
Intel processors similar to Microachitectural Data Sampling (MDS). In this
|
||||||
|
case certain loads may speculatively pass invalid data to dependent operations
|
||||||
|
when an asynchronous abort condition is pending in a Transactional
|
||||||
|
Synchronization Extensions (TSX) transaction. This includes loads with no
|
||||||
|
fault or assist condition. Such loads may speculatively expose stale data from
|
||||||
|
the same uarch data structures as in MDS, with same scope of exposure i.e.
|
||||||
|
same-thread and cross-thread. This issue affects all current processors that
|
||||||
|
support TSX.
|
||||||
|
|
||||||
|
Mitigation strategy
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
a) TSX disable - one of the mitigations is to disable TSX. A new MSR
|
||||||
|
IA32_TSX_CTRL will be available in future and current processors after
|
||||||
|
microcode update which can be used to disable TSX. In addition, it
|
||||||
|
controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID.
|
||||||
|
|
||||||
|
b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this
|
||||||
|
vulnerability. More details on this approach can be found in
|
||||||
|
:ref:`Documentation/admin-guide/hw-vuln/mds.rst <mds>`.
|
||||||
|
|
||||||
|
Kernel internal mitigation modes
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
============= ============================================================
|
||||||
|
off Mitigation is disabled. Either the CPU is not affected or
|
||||||
|
tsx_async_abort=off is supplied on the kernel command line.
|
||||||
|
|
||||||
|
tsx disabled Mitigation is enabled. TSX feature is disabled by default at
|
||||||
|
bootup on processors that support TSX control.
|
||||||
|
|
||||||
|
verw Mitigation is enabled. CPU is affected and MD_CLEAR is
|
||||||
|
advertised in CPUID.
|
||||||
|
|
||||||
|
ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not
|
||||||
|
advertised in CPUID. That is mainly for virtualization
|
||||||
|
scenarios where the host has the updated microcode but the
|
||||||
|
hypervisor does not expose MD_CLEAR in CPUID. It's a best
|
||||||
|
effort approach without guarantee.
|
||||||
|
============= ============================================================
|
||||||
|
|
||||||
|
If the CPU is affected and the "tsx_async_abort" kernel command line parameter is
|
||||||
|
not provided then the kernel selects an appropriate mitigation depending on the
|
||||||
|
status of RTM and MD_CLEAR CPUID bits.
|
||||||
|
|
||||||
|
Below tables indicate the impact of tsx=on|off|auto cmdline options on state of
|
||||||
|
TAA mitigation, VERW behavior and TSX feature for various combinations of
|
||||||
|
MSR_IA32_ARCH_CAPABILITIES bits.
|
||||||
|
|
||||||
|
1. "tsx=off"
|
||||||
|
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off
|
||||||
|
---------------------------------- -------------------------------------------------------------------------
|
||||||
|
TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
|
||||||
|
after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
0 0 0 HW default Yes Same as MDS Same as MDS
|
||||||
|
0 0 1 Invalid case Invalid case Invalid case Invalid case
|
||||||
|
0 1 0 HW default No Need ucode update Need ucode update
|
||||||
|
0 1 1 Disabled Yes TSX disabled TSX disabled
|
||||||
|
1 X 1 Disabled X None needed None needed
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
|
||||||
|
2. "tsx=on"
|
||||||
|
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on
|
||||||
|
---------------------------------- -------------------------------------------------------------------------
|
||||||
|
TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
|
||||||
|
after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
0 0 0 HW default Yes Same as MDS Same as MDS
|
||||||
|
0 0 1 Invalid case Invalid case Invalid case Invalid case
|
||||||
|
0 1 0 HW default No Need ucode update Need ucode update
|
||||||
|
0 1 1 Enabled Yes None Same as MDS
|
||||||
|
1 X 1 Enabled X None needed None needed
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
|
||||||
|
3. "tsx=auto"
|
||||||
|
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto
|
||||||
|
---------------------------------- -------------------------------------------------------------------------
|
||||||
|
TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
|
||||||
|
after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
0 0 0 HW default Yes Same as MDS Same as MDS
|
||||||
|
0 0 1 Invalid case Invalid case Invalid case Invalid case
|
||||||
|
0 1 0 HW default No Need ucode update Need ucode update
|
||||||
|
0 1 1 Disabled Yes TSX disabled TSX disabled
|
||||||
|
1 X 1 Enabled X None needed None needed
|
||||||
|
========= ========= ============ ============ ============== =================== ======================
|
||||||
|
|
||||||
|
In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that
|
||||||
|
indicates whether MSR_IA32_TSX_CTRL is supported.
|
||||||
|
|
||||||
|
There are two control bits in IA32_TSX_CTRL MSR:
|
||||||
|
|
||||||
|
Bit 0: When set it disables the Restricted Transactional Memory (RTM)
|
||||||
|
sub-feature of TSX (will force all transactions to abort on the
|
||||||
|
XBEGIN instruction).
|
||||||
|
|
||||||
|
Bit 1: When set it disables the enumeration of the RTM and HLE feature
|
||||||
|
(i.e. it will make CPUID(EAX=7).EBX{bit4} and
|
||||||
|
CPUID(EAX=7).EBX{bit11} read as 0).
|
|
@ -3268,7 +3268,6 @@ S: Maintained
|
||||||
F: drivers/cpufreq/bmips-cpufreq.c
|
F: drivers/cpufreq/bmips-cpufreq.c
|
||||||
|
|
||||||
BROADCOM BMIPS MIPS ARCHITECTURE
|
BROADCOM BMIPS MIPS ARCHITECTURE
|
||||||
M: Kevin Cernekee <cernekee@gmail.com>
|
|
||||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
L: bcm-kernel-feedback-list@broadcom.com
|
L: bcm-kernel-feedback-list@broadcom.com
|
||||||
L: linux-mips@vger.kernel.org
|
L: linux-mips@vger.kernel.org
|
||||||
|
@ -3745,7 +3744,6 @@ F: drivers/crypto/cavium/cpt/
|
||||||
|
|
||||||
CAVIUM THUNDERX2 ARM64 SOC
|
CAVIUM THUNDERX2 ARM64 SOC
|
||||||
M: Robert Richter <rrichter@cavium.com>
|
M: Robert Richter <rrichter@cavium.com>
|
||||||
M: Jayachandran C <jnair@caviumnetworks.com>
|
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
|
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
|
||||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -917,6 +917,9 @@ ifeq ($(CONFIG_RELR),y)
|
||||||
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
|
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# make the checker run with the right architecture
|
||||||
|
CHECKFLAGS += --arch=$(ARCH)
|
||||||
|
|
||||||
# insure the checker run with the right endianness
|
# insure the checker run with the right endianness
|
||||||
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
|
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
|
||||||
|
|
||||||
|
|
|
@ -328,6 +328,10 @@ &pwm3 {
|
||||||
pinctrl-0 = <&pinctrl_pwm3>;
|
pinctrl-0 = <&pinctrl_pwm3>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&snvs_pwrkey {
|
||||||
|
status = "okay";
|
||||||
|
};
|
||||||
|
|
||||||
&ssi2 {
|
&ssi2 {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
|
@ -230,6 +230,8 @@ magnetometer@e {
|
||||||
accelerometer@1c {
|
accelerometer@1c {
|
||||||
compatible = "fsl,mma8451";
|
compatible = "fsl,mma8451";
|
||||||
reg = <0x1c>;
|
reg = <0x1c>;
|
||||||
|
pinctrl-names = "default";
|
||||||
|
pinctrl-0 = <&pinctrl_mma8451_int>;
|
||||||
interrupt-parent = <&gpio6>;
|
interrupt-parent = <&gpio6>;
|
||||||
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
|
||||||
};
|
};
|
||||||
|
@ -628,6 +630,12 @@ MX6QDL_PAD_SD2_DAT0__GPIO1_IO15 0x1b0b0
|
||||||
>;
|
>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pinctrl_mma8451_int: mma8451intgrp {
|
||||||
|
fsl,pins = <
|
||||||
|
MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0xb0b1
|
||||||
|
>;
|
||||||
|
};
|
||||||
|
|
||||||
pinctrl_pwm3: pwm1grp {
|
pinctrl_pwm3: pwm1grp {
|
||||||
fsl,pins = <
|
fsl,pins = <
|
||||||
MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
|
MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
|
||||||
|
|
|
@ -183,14 +183,12 @@ &i2c2 {
|
||||||
|
|
||||||
ov5640: camera@3c {
|
ov5640: camera@3c {
|
||||||
compatible = "ovti,ov5640";
|
compatible = "ovti,ov5640";
|
||||||
pinctrl-names = "default";
|
|
||||||
pinctrl-0 = <&ov5640_pins>;
|
|
||||||
reg = <0x3c>;
|
reg = <0x3c>;
|
||||||
clocks = <&clk_ext_camera>;
|
clocks = <&clk_ext_camera>;
|
||||||
clock-names = "xclk";
|
clock-names = "xclk";
|
||||||
DOVDD-supply = <&v2v8>;
|
DOVDD-supply = <&v2v8>;
|
||||||
powerdown-gpios = <&stmfx_pinctrl 18 GPIO_ACTIVE_HIGH>;
|
powerdown-gpios = <&stmfx_pinctrl 18 (GPIO_ACTIVE_HIGH | GPIO_PUSH_PULL)>;
|
||||||
reset-gpios = <&stmfx_pinctrl 19 GPIO_ACTIVE_LOW>;
|
reset-gpios = <&stmfx_pinctrl 19 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
|
||||||
rotation = <180>;
|
rotation = <180>;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
|
@ -223,15 +221,8 @@ stmfx_pinctrl: stmfx-pin-controller {
|
||||||
|
|
||||||
joystick_pins: joystick {
|
joystick_pins: joystick {
|
||||||
pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
|
pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
|
||||||
drive-push-pull;
|
|
||||||
bias-pull-down;
|
bias-pull-down;
|
||||||
};
|
};
|
||||||
|
|
||||||
ov5640_pins: camera {
|
|
||||||
pins = "agpio2", "agpio3"; /* stmfx pins 18 & 19 */
|
|
||||||
drive-push-pull;
|
|
||||||
output-low;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -932,7 +932,7 @@ m_can1: can@4400e000 {
|
||||||
interrupt-names = "int0", "int1";
|
interrupt-names = "int0", "int1";
|
||||||
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
|
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
|
||||||
clock-names = "hclk", "cclk";
|
clock-names = "hclk", "cclk";
|
||||||
bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
|
bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -945,7 +945,7 @@ m_can2: can@4400f000 {
|
||||||
interrupt-names = "int0", "int1";
|
interrupt-names = "int0", "int1";
|
||||||
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
|
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
|
||||||
clock-names = "hclk", "cclk";
|
clock-names = "hclk", "cclk";
|
||||||
bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
|
bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -192,6 +192,7 @@ &mmc1 {
|
||||||
vqmmc-supply = <®_dldo1>;
|
vqmmc-supply = <®_dldo1>;
|
||||||
non-removable;
|
non-removable;
|
||||||
wakeup-source;
|
wakeup-source;
|
||||||
|
keep-power-in-suspend;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
brcmf: wifi@1 {
|
brcmf: wifi@1 {
|
||||||
|
|
|
@ -481,14 +481,18 @@ static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
|
||||||
static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
|
static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
|
||||||
{
|
{
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
int gating_bit = cpu;
|
||||||
|
|
||||||
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
|
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
|
||||||
if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
|
if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (is_a83t && cpu == 0)
|
||||||
|
gating_bit = 4;
|
||||||
|
|
||||||
/* gate processor power */
|
/* gate processor power */
|
||||||
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
||||||
reg |= PRCM_PWROFF_GATING_REG_CORE(cpu);
|
reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
|
||||||
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
||||||
udelay(20);
|
udelay(20);
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@ &i2c0 {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
i2c-mux@77 {
|
i2c-mux@77 {
|
||||||
compatible = "nxp,pca9847";
|
compatible = "nxp,pca9547";
|
||||||
reg = <0x77>;
|
reg = <0x77>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
|
@ -394,7 +394,7 @@ wdog3: watchdog@302a0000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma2: dma-controller@302c0000 {
|
sdma2: dma-controller@302c0000 {
|
||||||
compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x302c0000 0x10000>;
|
reg = <0x302c0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
|
clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
|
||||||
|
@ -405,7 +405,7 @@ sdma2: dma-controller@302c0000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma3: dma-controller@302b0000 {
|
sdma3: dma-controller@302b0000 {
|
||||||
compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x302b0000 0x10000>;
|
reg = <0x302b0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
|
clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
|
||||||
|
@ -737,7 +737,7 @@ usdhc3: mmc@30b60000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma1: dma-controller@30bd0000 {
|
sdma1: dma-controller@30bd0000 {
|
||||||
compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x30bd0000 0x10000>;
|
reg = <0x30bd0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
|
clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
|
||||||
|
|
|
@ -288,7 +288,7 @@ wdog3: watchdog@302a0000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma3: dma-controller@302b0000 {
|
sdma3: dma-controller@302b0000 {
|
||||||
compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x302b0000 0x10000>;
|
reg = <0x302b0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>,
|
clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>,
|
||||||
|
@ -299,7 +299,7 @@ sdma3: dma-controller@302b0000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma2: dma-controller@302c0000 {
|
sdma2: dma-controller@302c0000 {
|
||||||
compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x302c0000 0x10000>;
|
reg = <0x302c0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>,
|
clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>,
|
||||||
|
@ -612,7 +612,7 @@ usdhc3: mmc@30b60000 {
|
||||||
};
|
};
|
||||||
|
|
||||||
sdma1: dma-controller@30bd0000 {
|
sdma1: dma-controller@30bd0000 {
|
||||||
compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
|
compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
|
||||||
reg = <0x30bd0000 0x10000>;
|
reg = <0x30bd0000 0x10000>;
|
||||||
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
|
clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
|
||||||
|
|
|
@ -88,7 +88,7 @@ reg_arm: regulator-arm {
|
||||||
regulator-name = "0V9_ARM";
|
regulator-name = "0V9_ARM";
|
||||||
regulator-min-microvolt = <900000>;
|
regulator-min-microvolt = <900000>;
|
||||||
regulator-max-microvolt = <1000000>;
|
regulator-max-microvolt = <1000000>;
|
||||||
gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
|
gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
|
||||||
states = <1000000 0x1
|
states = <1000000 0x1
|
||||||
900000 0x0>;
|
900000 0x0>;
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
|
|
|
@ -30,13 +30,6 @@ int __arm64_get_clock_mode(struct timekeeper *tk)
|
||||||
}
|
}
|
||||||
#define __arch_get_clock_mode __arm64_get_clock_mode
|
#define __arch_get_clock_mode __arm64_get_clock_mode
|
||||||
|
|
||||||
static __always_inline
|
|
||||||
int __arm64_use_vsyscall(struct vdso_data *vdata)
|
|
||||||
{
|
|
||||||
return !vdata[CS_HRES_COARSE].clock_mode;
|
|
||||||
}
|
|
||||||
#define __arch_use_vsyscall __arm64_use_vsyscall
|
|
||||||
|
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
|
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
|
||||||
{
|
{
|
||||||
|
|
|
@ -28,13 +28,6 @@ int __mips_get_clock_mode(struct timekeeper *tk)
|
||||||
}
|
}
|
||||||
#define __arch_get_clock_mode __mips_get_clock_mode
|
#define __arch_get_clock_mode __mips_get_clock_mode
|
||||||
|
|
||||||
static __always_inline
|
|
||||||
int __mips_use_vsyscall(struct vdso_data *vdata)
|
|
||||||
{
|
|
||||||
return (vdata[CS_HRES_COARSE].clock_mode != VDSO_CLOCK_NONE);
|
|
||||||
}
|
|
||||||
#define __arch_use_vsyscall __mips_use_vsyscall
|
|
||||||
|
|
||||||
/* The asm-generic header needs to be included after the definitions above */
|
/* The asm-generic header needs to be included after the definitions above */
|
||||||
#include <asm-generic/vdso/vsyscall.h>
|
#include <asm-generic/vdso/vsyscall.h>
|
||||||
|
|
||||||
|
|
|
@ -38,10 +38,3 @@ config REPLICATE_KTEXT
|
||||||
Say Y here to enable replicating the kernel text across multiple
|
Say Y here to enable replicating the kernel text across multiple
|
||||||
nodes in a NUMA cluster. This trades memory for speed.
|
nodes in a NUMA cluster. This trades memory for speed.
|
||||||
|
|
||||||
config REPLICATE_EXHANDLERS
|
|
||||||
bool "Exception handler replication support"
|
|
||||||
depends on SGI_IP27
|
|
||||||
help
|
|
||||||
Say Y here to enable replicating the kernel exception handlers
|
|
||||||
across multiple nodes in a NUMA cluster. This trades memory for
|
|
||||||
speed.
|
|
||||||
|
|
|
@ -69,23 +69,14 @@ static void per_hub_init(cnodeid_t cnode)
|
||||||
|
|
||||||
hub_rtc_init(cnode);
|
hub_rtc_init(cnode);
|
||||||
|
|
||||||
#ifdef CONFIG_REPLICATE_EXHANDLERS
|
if (nasid) {
|
||||||
/*
|
/* copy exception handlers from first node to current node */
|
||||||
* If this is not a headless node initialization,
|
memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
|
||||||
* copy over the caliased exception handlers.
|
(void *)CKSEG0, 0x200);
|
||||||
*/
|
|
||||||
if (get_compact_nodeid() == cnode) {
|
|
||||||
extern char except_vec2_generic, except_vec3_generic;
|
|
||||||
extern void build_tlb_refill_handler(void);
|
|
||||||
|
|
||||||
memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
|
|
||||||
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
|
|
||||||
build_tlb_refill_handler();
|
|
||||||
memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
|
|
||||||
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
|
|
||||||
__flush_cache_all();
|
__flush_cache_all();
|
||||||
|
/* switch to node local exception handlers */
|
||||||
|
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void per_cpu_init(void)
|
void per_cpu_init(void)
|
||||||
|
|
|
@ -332,11 +332,7 @@ static void __init mlreset(void)
|
||||||
* thinks it is a node 0 address.
|
* thinks it is a node 0 address.
|
||||||
*/
|
*/
|
||||||
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
|
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
|
||||||
#ifdef CONFIG_REPLICATE_EXHANDLERS
|
|
||||||
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
|
|
||||||
#else
|
|
||||||
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
|
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef LATER
|
#ifdef LATER
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
|
||||||
#
|
#
|
||||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||||
#
|
#
|
||||||
CFLAGS_REMOVE_vdso-note.o = -pg
|
|
||||||
CFLAGS_REMOVE_vclock_gettime.o = -pg
|
CFLAGS_REMOVE_vclock_gettime.o = -pg
|
||||||
|
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
|
||||||
|
|
||||||
$(obj)/%.so: OBJCOPYFLAGS := -S
|
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||||
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||||
$(call if_changed,objcopy)
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
|
CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
|
||||||
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
|
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
|
||||||
|
|
||||||
#This makes sure the $(obj) subdirectory exists even though vdso32/
|
#This makes sure the $(obj) subdirectory exists even though vdso32/
|
||||||
|
|
|
@ -1940,6 +1940,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||||
|
|
||||||
If unsure, say y.
|
If unsure, say y.
|
||||||
|
|
||||||
|
choice
|
||||||
|
prompt "TSX enable mode"
|
||||||
|
depends on CPU_SUP_INTEL
|
||||||
|
default X86_INTEL_TSX_MODE_OFF
|
||||||
|
help
|
||||||
|
Intel's TSX (Transactional Synchronization Extensions) feature
|
||||||
|
allows to optimize locking protocols through lock elision which
|
||||||
|
can lead to a noticeable performance boost.
|
||||||
|
|
||||||
|
On the other hand it has been shown that TSX can be exploited
|
||||||
|
to form side channel attacks (e.g. TAA) and chances are there
|
||||||
|
will be more of those attacks discovered in the future.
|
||||||
|
|
||||||
|
Therefore TSX is not enabled by default (aka tsx=off). An admin
|
||||||
|
might override this decision by tsx=on the command line parameter.
|
||||||
|
Even with TSX enabled, the kernel will attempt to enable the best
|
||||||
|
possible TAA mitigation setting depending on the microcode available
|
||||||
|
for the particular machine.
|
||||||
|
|
||||||
|
This option allows to set the default tsx mode between tsx=on, =off
|
||||||
|
and =auto. See Documentation/admin-guide/kernel-parameters.txt for more
|
||||||
|
details.
|
||||||
|
|
||||||
|
Say off if not sure, auto if TSX is in use but it should be used on safe
|
||||||
|
platforms or on if TSX is in use and the security aspect of tsx is not
|
||||||
|
relevant.
|
||||||
|
|
||||||
|
config X86_INTEL_TSX_MODE_OFF
|
||||||
|
bool "off"
|
||||||
|
help
|
||||||
|
TSX is disabled if possible - equals to tsx=off command line parameter.
|
||||||
|
|
||||||
|
config X86_INTEL_TSX_MODE_ON
|
||||||
|
bool "on"
|
||||||
|
help
|
||||||
|
TSX is always enabled on TSX capable HW - equals the tsx=on command
|
||||||
|
line parameter.
|
||||||
|
|
||||||
|
config X86_INTEL_TSX_MODE_AUTO
|
||||||
|
bool "auto"
|
||||||
|
help
|
||||||
|
TSX is enabled on TSX capable HW that is believed to be safe against
|
||||||
|
side channel attacks- equals the tsx=auto command line parameter.
|
||||||
|
endchoice
|
||||||
|
|
||||||
config EFI
|
config EFI
|
||||||
bool "EFI runtime service support"
|
bool "EFI runtime service support"
|
||||||
depends on ACPI
|
depends on ACPI
|
||||||
|
|
|
@ -399,5 +399,7 @@
|
||||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
||||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||||
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
||||||
|
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
||||||
|
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||||
|
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
|
|
@ -312,9 +312,12 @@ struct kvm_rmap_head {
|
||||||
struct kvm_mmu_page {
|
struct kvm_mmu_page {
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
struct hlist_node hash_link;
|
struct hlist_node hash_link;
|
||||||
|
struct list_head lpage_disallowed_link;
|
||||||
|
|
||||||
bool unsync;
|
bool unsync;
|
||||||
u8 mmu_valid_gen;
|
u8 mmu_valid_gen;
|
||||||
bool mmio_cached;
|
bool mmio_cached;
|
||||||
|
bool lpage_disallowed; /* Can't be replaced by an equiv large page */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following two entries are used to key the shadow page in the
|
* The following two entries are used to key the shadow page in the
|
||||||
|
@ -859,6 +862,7 @@ struct kvm_arch {
|
||||||
*/
|
*/
|
||||||
struct list_head active_mmu_pages;
|
struct list_head active_mmu_pages;
|
||||||
struct list_head zapped_obsolete_pages;
|
struct list_head zapped_obsolete_pages;
|
||||||
|
struct list_head lpage_disallowed_mmu_pages;
|
||||||
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
||||||
struct kvm_page_track_notifier_head track_notifier_head;
|
struct kvm_page_track_notifier_head track_notifier_head;
|
||||||
|
|
||||||
|
@ -933,6 +937,7 @@ struct kvm_arch {
|
||||||
bool exception_payload_enabled;
|
bool exception_payload_enabled;
|
||||||
|
|
||||||
struct kvm_pmu_event_filter *pmu_event_filter;
|
struct kvm_pmu_event_filter *pmu_event_filter;
|
||||||
|
struct task_struct *nx_lpage_recovery_thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_vm_stat {
|
struct kvm_vm_stat {
|
||||||
|
@ -946,6 +951,7 @@ struct kvm_vm_stat {
|
||||||
ulong mmu_unsync;
|
ulong mmu_unsync;
|
||||||
ulong remote_tlb_flush;
|
ulong remote_tlb_flush;
|
||||||
ulong lpages;
|
ulong lpages;
|
||||||
|
ulong nx_lpage_splits;
|
||||||
ulong max_mmu_page_hash_collisions;
|
ulong max_mmu_page_hash_collisions;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,18 @@
|
||||||
* Microarchitectural Data
|
* Microarchitectural Data
|
||||||
* Sampling (MDS) vulnerabilities.
|
* Sampling (MDS) vulnerabilities.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /*
|
||||||
|
* The processor is not susceptible to a
|
||||||
|
* machine check error due to modifying the
|
||||||
|
* code page size along with either the
|
||||||
|
* physical address or cache type
|
||||||
|
* without TLB invalidation.
|
||||||
|
*/
|
||||||
|
#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */
|
||||||
|
#define ARCH_CAP_TAA_NO BIT(8) /*
|
||||||
|
* Not susceptible to
|
||||||
|
* TSX Async Abort (TAA) vulnerabilities.
|
||||||
|
*/
|
||||||
|
|
||||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||||
#define L1D_FLUSH BIT(0) /*
|
#define L1D_FLUSH BIT(0) /*
|
||||||
|
@ -103,6 +115,10 @@
|
||||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||||
|
|
||||||
|
#define MSR_IA32_TSX_CTRL 0x00000122
|
||||||
|
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
|
||||||
|
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
|
||||||
|
|
||||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||||
#define MSR_IA32_SYSENTER_EIP 0x00000176
|
#define MSR_IA32_SYSENTER_EIP 0x00000176
|
||||||
|
|
|
@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mds_clear_cpu_buffers - Mitigation for MDS vulnerability
|
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
||||||
*
|
*
|
||||||
* This uses the otherwise unused and obsolete VERW instruction in
|
* This uses the otherwise unused and obsolete VERW instruction in
|
||||||
* combination with microcode which triggers a CPU buffer flush when the
|
* combination with microcode which triggers a CPU buffer flush when the
|
||||||
|
@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
|
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
||||||
*
|
*
|
||||||
* Clear CPU buffers if the corresponding static key is enabled
|
* Clear CPU buffers if the corresponding static key is enabled
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -988,4 +988,11 @@ enum mds_mitigations {
|
||||||
MDS_MITIGATION_VMWERV,
|
MDS_MITIGATION_VMWERV,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum taa_mitigations {
|
||||||
|
TAA_MITIGATION_OFF,
|
||||||
|
TAA_MITIGATION_UCODE_NEEDED,
|
||||||
|
TAA_MITIGATION_VERW,
|
||||||
|
TAA_MITIGATION_TSX_DISABLED,
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* _ASM_X86_PROCESSOR_H */
|
#endif /* _ASM_X86_PROCESSOR_H */
|
||||||
|
|
|
@ -1586,9 +1586,6 @@ static void setup_local_APIC(void)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
unsigned int value;
|
unsigned int value;
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
int logical_apicid, ldr_apicid;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (disable_apic) {
|
if (disable_apic) {
|
||||||
disable_ioapic_support();
|
disable_ioapic_support();
|
||||||
|
@ -1626,16 +1623,21 @@ static void setup_local_APIC(void)
|
||||||
apic->init_apic_ldr();
|
apic->init_apic_ldr();
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/*
|
if (apic->dest_logical) {
|
||||||
* APIC LDR is initialized. If logical_apicid mapping was
|
int logical_apicid, ldr_apicid;
|
||||||
* initialized during get_smp_config(), make sure it matches the
|
|
||||||
* actual value.
|
/*
|
||||||
*/
|
* APIC LDR is initialized. If logical_apicid mapping was
|
||||||
logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
* initialized during get_smp_config(), make sure it matches
|
||||||
ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
|
* the actual value.
|
||||||
WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid);
|
*/
|
||||||
/* always use the value from LDR */
|
logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||||
early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
|
ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
|
||||||
|
if (logical_apicid != BAD_APICID)
|
||||||
|
WARN_ON(logical_apicid != ldr_apicid);
|
||||||
|
/* Always use the value from LDR. */
|
||||||
|
early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
|
||||||
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
||||||
|
|
||||||
ifdef CONFIG_CPU_SUP_INTEL
|
ifdef CONFIG_CPU_SUP_INTEL
|
||||||
obj-y += intel.o intel_pconfig.o
|
obj-y += intel.o intel_pconfig.o tsx.o
|
||||||
obj-$(CONFIG_PM) += intel_epb.o
|
obj-$(CONFIG_PM) += intel_epb.o
|
||||||
endif
|
endif
|
||||||
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
|
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
|
||||||
|
|
|
@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
|
||||||
static void __init ssb_select_mitigation(void);
|
static void __init ssb_select_mitigation(void);
|
||||||
static void __init l1tf_select_mitigation(void);
|
static void __init l1tf_select_mitigation(void);
|
||||||
static void __init mds_select_mitigation(void);
|
static void __init mds_select_mitigation(void);
|
||||||
|
static void __init taa_select_mitigation(void);
|
||||||
|
|
||||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
||||||
u64 x86_spec_ctrl_base;
|
u64 x86_spec_ctrl_base;
|
||||||
|
@ -105,6 +106,7 @@ void __init check_bugs(void)
|
||||||
ssb_select_mitigation();
|
ssb_select_mitigation();
|
||||||
l1tf_select_mitigation();
|
l1tf_select_mitigation();
|
||||||
mds_select_mitigation();
|
mds_select_mitigation();
|
||||||
|
taa_select_mitigation();
|
||||||
|
|
||||||
arch_smt_update();
|
arch_smt_update();
|
||||||
|
|
||||||
|
@ -268,6 +270,100 @@ static int __init mds_cmdline(char *str)
|
||||||
}
|
}
|
||||||
early_param("mds", mds_cmdline);
|
early_param("mds", mds_cmdline);
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "TAA: " fmt
|
||||||
|
|
||||||
|
/* Default mitigation for TAA-affected CPUs */
|
||||||
|
static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
|
||||||
|
static bool taa_nosmt __ro_after_init;
|
||||||
|
|
||||||
|
static const char * const taa_strings[] = {
|
||||||
|
[TAA_MITIGATION_OFF] = "Vulnerable",
|
||||||
|
[TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
|
||||||
|
[TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
|
||||||
|
[TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __init taa_select_mitigation(void)
|
||||||
|
{
|
||||||
|
u64 ia32_cap;
|
||||||
|
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_OFF;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* TSX previously disabled by tsx=off */
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_mitigations_off()) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_OFF;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
|
||||||
|
if (taa_mitigation == TAA_MITIGATION_OFF)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
|
||||||
|
taa_mitigation = TAA_MITIGATION_VERW;
|
||||||
|
else
|
||||||
|
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
|
||||||
|
* A microcode update fixes this behavior to clear CPU buffers. It also
|
||||||
|
* adds support for MSR_IA32_TSX_CTRL which is enumerated by the
|
||||||
|
* ARCH_CAP_TSX_CTRL_MSR bit.
|
||||||
|
*
|
||||||
|
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
|
||||||
|
* update is required.
|
||||||
|
*/
|
||||||
|
ia32_cap = x86_read_arch_cap_msr();
|
||||||
|
if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
|
||||||
|
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
|
||||||
|
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TSX is enabled, select alternate mitigation for TAA which is
|
||||||
|
* the same as MDS. Enable MDS static branch to clear CPU buffers.
|
||||||
|
*
|
||||||
|
* For guests that can't determine whether the correct microcode is
|
||||||
|
* present on host, enable the mitigation for UCODE_NEEDED as well.
|
||||||
|
*/
|
||||||
|
static_branch_enable(&mds_user_clear);
|
||||||
|
|
||||||
|
if (taa_nosmt || cpu_mitigations_auto_nosmt())
|
||||||
|
cpu_smt_disable(false);
|
||||||
|
|
||||||
|
out:
|
||||||
|
pr_info("%s\n", taa_strings[taa_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init tsx_async_abort_parse_cmdline(char *str)
|
||||||
|
{
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_TAA))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!str)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!strcmp(str, "off")) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_OFF;
|
||||||
|
} else if (!strcmp(str, "full")) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_VERW;
|
||||||
|
} else if (!strcmp(str, "full,nosmt")) {
|
||||||
|
taa_mitigation = TAA_MITIGATION_VERW;
|
||||||
|
taa_nosmt = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
#define pr_fmt(fmt) "Spectre V1 : " fmt
|
#define pr_fmt(fmt) "Spectre V1 : " fmt
|
||||||
|
|
||||||
|
@ -786,13 +882,10 @@ static void update_mds_branch_idle(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||||
|
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||||
|
|
||||||
void cpu_bugs_smt_update(void)
|
void cpu_bugs_smt_update(void)
|
||||||
{
|
{
|
||||||
/* Enhanced IBRS implies STIBP. No update required. */
|
|
||||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
|
||||||
return;
|
|
||||||
|
|
||||||
mutex_lock(&spec_ctrl_mutex);
|
mutex_lock(&spec_ctrl_mutex);
|
||||||
|
|
||||||
switch (spectre_v2_user) {
|
switch (spectre_v2_user) {
|
||||||
|
@ -819,6 +912,17 @@ void cpu_bugs_smt_update(void)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (taa_mitigation) {
|
||||||
|
case TAA_MITIGATION_VERW:
|
||||||
|
case TAA_MITIGATION_UCODE_NEEDED:
|
||||||
|
if (sched_smt_active())
|
||||||
|
pr_warn_once(TAA_MSG_SMT);
|
||||||
|
break;
|
||||||
|
case TAA_MITIGATION_TSX_DISABLED:
|
||||||
|
case TAA_MITIGATION_OFF:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&spec_ctrl_mutex);
|
mutex_unlock(&spec_ctrl_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1149,6 +1253,9 @@ void x86_spec_ctrl_setup_ap(void)
|
||||||
x86_amd_ssb_disable();
|
x86_amd_ssb_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool itlb_multihit_kvm_mitigation;
|
||||||
|
EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
#define pr_fmt(fmt) "L1TF: " fmt
|
#define pr_fmt(fmt) "L1TF: " fmt
|
||||||
|
|
||||||
|
@ -1304,11 +1411,24 @@ static ssize_t l1tf_show_state(char *buf)
|
||||||
l1tf_vmx_states[l1tf_vmx_mitigation],
|
l1tf_vmx_states[l1tf_vmx_mitigation],
|
||||||
sched_smt_active() ? "vulnerable" : "disabled");
|
sched_smt_active() ? "vulnerable" : "disabled");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t itlb_multihit_show_state(char *buf)
|
||||||
|
{
|
||||||
|
if (itlb_multihit_kvm_mitigation)
|
||||||
|
return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
|
||||||
|
else
|
||||||
|
return sprintf(buf, "KVM: Vulnerable\n");
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
static ssize_t l1tf_show_state(char *buf)
|
static ssize_t l1tf_show_state(char *buf)
|
||||||
{
|
{
|
||||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t itlb_multihit_show_state(char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "Processor vulnerable\n");
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static ssize_t mds_show_state(char *buf)
|
static ssize_t mds_show_state(char *buf)
|
||||||
|
@ -1328,6 +1448,21 @@ static ssize_t mds_show_state(char *buf)
|
||||||
sched_smt_active() ? "vulnerable" : "disabled");
|
sched_smt_active() ? "vulnerable" : "disabled");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t tsx_async_abort_show_state(char *buf)
|
||||||
|
{
|
||||||
|
if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
|
||||||
|
(taa_mitigation == TAA_MITIGATION_OFF))
|
||||||
|
return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
|
||||||
|
|
||||||
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||||
|
return sprintf(buf, "%s; SMT Host state unknown\n",
|
||||||
|
taa_strings[taa_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
|
||||||
|
sched_smt_active() ? "vulnerable" : "disabled");
|
||||||
|
}
|
||||||
|
|
||||||
static char *stibp_state(void)
|
static char *stibp_state(void)
|
||||||
{
|
{
|
||||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||||
|
@ -1398,6 +1533,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||||
case X86_BUG_MDS:
|
case X86_BUG_MDS:
|
||||||
return mds_show_state(buf);
|
return mds_show_state(buf);
|
||||||
|
|
||||||
|
case X86_BUG_TAA:
|
||||||
|
return tsx_async_abort_show_state(buf);
|
||||||
|
|
||||||
|
case X86_BUG_ITLB_MULTIHIT:
|
||||||
|
return itlb_multihit_show_state(buf);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1434,4 +1575,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu
|
||||||
{
|
{
|
||||||
return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
|
return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1016,13 +1016,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NO_SPECULATION BIT(0)
|
#define NO_SPECULATION BIT(0)
|
||||||
#define NO_MELTDOWN BIT(1)
|
#define NO_MELTDOWN BIT(1)
|
||||||
#define NO_SSB BIT(2)
|
#define NO_SSB BIT(2)
|
||||||
#define NO_L1TF BIT(3)
|
#define NO_L1TF BIT(3)
|
||||||
#define NO_MDS BIT(4)
|
#define NO_MDS BIT(4)
|
||||||
#define MSBDS_ONLY BIT(5)
|
#define MSBDS_ONLY BIT(5)
|
||||||
#define NO_SWAPGS BIT(6)
|
#define NO_SWAPGS BIT(6)
|
||||||
|
#define NO_ITLB_MULTIHIT BIT(7)
|
||||||
|
|
||||||
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
||||||
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
||||||
|
@ -1043,27 +1044,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||||
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
||||||
|
|
||||||
/* Intel Family 6 */
|
/* Intel Family 6 */
|
||||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
|
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||||
|
@ -1073,15 +1074,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||||
* good enough for our purposes.
|
* good enough for our purposes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
/* AMD Family 0xf - 0x12 */
|
/* AMD Family 0xf - 0x12 */
|
||||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
|
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1092,19 +1095,30 @@ static bool __init cpu_matches(unsigned long which)
|
||||||
return m && !!(m->driver_data & which);
|
return m && !!(m->driver_data & which);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
u64 x86_read_arch_cap_msr(void)
|
||||||
{
|
{
|
||||||
u64 ia32_cap = 0;
|
u64 ia32_cap = 0;
|
||||||
|
|
||||||
|
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
||||||
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||||
|
|
||||||
|
return ia32_cap;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||||
|
|
||||||
|
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
|
||||||
|
if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
|
||||||
|
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
|
||||||
|
|
||||||
if (cpu_matches(NO_SPECULATION))
|
if (cpu_matches(NO_SPECULATION))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
||||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
||||||
|
|
||||||
if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
|
if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
|
||||||
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
||||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||||
|
@ -1121,6 +1135,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||||
if (!cpu_matches(NO_SWAPGS))
|
if (!cpu_matches(NO_SWAPGS))
|
||||||
setup_force_cpu_bug(X86_BUG_SWAPGS);
|
setup_force_cpu_bug(X86_BUG_SWAPGS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
|
||||||
|
* - TSX is supported or
|
||||||
|
* - TSX_CTRL is present
|
||||||
|
*
|
||||||
|
* TSX_CTRL check is needed for cases when TSX could be disabled before
|
||||||
|
* the kernel boot e.g. kexec.
|
||||||
|
* TSX_CTRL check alone is not sufficient for cases when the microcode
|
||||||
|
* update is not present or running as guest that don't get TSX_CTRL.
|
||||||
|
*/
|
||||||
|
if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
|
||||||
|
(cpu_has(c, X86_FEATURE_RTM) ||
|
||||||
|
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
|
||||||
|
setup_force_cpu_bug(X86_BUG_TAA);
|
||||||
|
|
||||||
if (cpu_matches(NO_MELTDOWN))
|
if (cpu_matches(NO_MELTDOWN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1554,6 +1583,8 @@ void __init identify_boot_cpu(void)
|
||||||
#endif
|
#endif
|
||||||
cpu_detect_tlb(&boot_cpu_data);
|
cpu_detect_tlb(&boot_cpu_data);
|
||||||
setup_cr_pinning();
|
setup_cr_pinning();
|
||||||
|
|
||||||
|
tsx_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
||||||
|
|
|
@ -44,6 +44,22 @@ struct _tlb_table {
|
||||||
extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
||||||
*const __x86_cpu_dev_end[];
|
*const __x86_cpu_dev_end[];
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_SUP_INTEL
|
||||||
|
enum tsx_ctrl_states {
|
||||||
|
TSX_CTRL_ENABLE,
|
||||||
|
TSX_CTRL_DISABLE,
|
||||||
|
TSX_CTRL_NOT_SUPPORTED,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
|
||||||
|
|
||||||
|
extern void __init tsx_init(void);
|
||||||
|
extern void tsx_enable(void);
|
||||||
|
extern void tsx_disable(void);
|
||||||
|
#else
|
||||||
|
static inline void tsx_init(void) { }
|
||||||
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
||||||
|
|
||||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||||
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
||||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||||
|
@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu);
|
||||||
|
|
||||||
extern void x86_spec_ctrl_setup_ap(void);
|
extern void x86_spec_ctrl_setup_ap(void);
|
||||||
|
|
||||||
|
extern u64 x86_read_arch_cap_msr(void);
|
||||||
|
|
||||||
#endif /* ARCH_X86_CPU_H */
|
#endif /* ARCH_X86_CPU_H */
|
||||||
|
|
|
@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||||
detect_tme(c);
|
detect_tme(c);
|
||||||
|
|
||||||
init_intel_misc_features(c);
|
init_intel_misc_features(c);
|
||||||
|
|
||||||
|
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
|
||||||
|
tsx_enable();
|
||||||
|
if (tsx_ctrl_state == TSX_CTRL_DISABLE)
|
||||||
|
tsx_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
|
|
@ -522,6 +522,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||||
|
if (!rdtgrp) {
|
||||||
|
ret = -ENOENT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
md.priv = of->kn->priv;
|
md.priv = of->kn->priv;
|
||||||
resid = md.u.rid;
|
resid = md.u.rid;
|
||||||
|
|
|
@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
||||||
}
|
}
|
||||||
|
|
||||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||||
rdt_last_cmd_clear();
|
|
||||||
if (!rdtgrp) {
|
if (!rdtgrp) {
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
rdt_last_cmd_puts("Directory was removed\n");
|
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
|
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
|
||||||
rdt_last_cmd_clear();
|
|
||||||
if (!prdtgrp) {
|
if (!prdtgrp) {
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
rdt_last_cmd_puts("Directory was removed\n");
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,140 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Intel Transactional Synchronization Extensions (TSX) control.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2019 Intel Corporation
|
||||||
|
*
|
||||||
|
* Author:
|
||||||
|
* Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/cpufeature.h>
|
||||||
|
|
||||||
|
#include <asm/cmdline.h>
|
||||||
|
|
||||||
|
#include "cpu.h"
|
||||||
|
|
||||||
|
enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
|
||||||
|
|
||||||
|
void tsx_disable(void)
|
||||||
|
{
|
||||||
|
u64 tsx;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_TSX_CTRL, tsx);
|
||||||
|
|
||||||
|
/* Force all transactions to immediately abort */
|
||||||
|
tsx |= TSX_CTRL_RTM_DISABLE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure TSX support is not enumerated in CPUID.
|
||||||
|
* This is visible to userspace and will ensure they
|
||||||
|
* do not waste resources trying TSX transactions that
|
||||||
|
* will always abort.
|
||||||
|
*/
|
||||||
|
tsx |= TSX_CTRL_CPUID_CLEAR;
|
||||||
|
|
||||||
|
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tsx_enable(void)
|
||||||
|
{
|
||||||
|
u64 tsx;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_TSX_CTRL, tsx);
|
||||||
|
|
||||||
|
/* Enable the RTM feature in the cpu */
|
||||||
|
tsx &= ~TSX_CTRL_RTM_DISABLE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure TSX support is enumerated in CPUID.
|
||||||
|
* This is visible to userspace and will ensure they
|
||||||
|
* can enumerate and use the TSX feature.
|
||||||
|
*/
|
||||||
|
tsx &= ~TSX_CTRL_CPUID_CLEAR;
|
||||||
|
|
||||||
|
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __init tsx_ctrl_is_supported(void)
|
||||||
|
{
|
||||||
|
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
|
||||||
|
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
|
||||||
|
*
|
||||||
|
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
|
||||||
|
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
|
||||||
|
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
|
||||||
|
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
|
||||||
|
* tsx= cmdline requests will do nothing on CPUs without
|
||||||
|
* MSR_IA32_TSX_CTRL support.
|
||||||
|
*/
|
||||||
|
return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
|
||||||
|
{
|
||||||
|
if (boot_cpu_has_bug(X86_BUG_TAA))
|
||||||
|
return TSX_CTRL_DISABLE;
|
||||||
|
|
||||||
|
return TSX_CTRL_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init tsx_init(void)
|
||||||
|
{
|
||||||
|
char arg[5] = {};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!tsx_ctrl_is_supported())
|
||||||
|
return;
|
||||||
|
|
||||||
|
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
|
||||||
|
if (ret >= 0) {
|
||||||
|
if (!strcmp(arg, "on")) {
|
||||||
|
tsx_ctrl_state = TSX_CTRL_ENABLE;
|
||||||
|
} else if (!strcmp(arg, "off")) {
|
||||||
|
tsx_ctrl_state = TSX_CTRL_DISABLE;
|
||||||
|
} else if (!strcmp(arg, "auto")) {
|
||||||
|
tsx_ctrl_state = x86_get_tsx_auto_mode();
|
||||||
|
} else {
|
||||||
|
tsx_ctrl_state = TSX_CTRL_DISABLE;
|
||||||
|
pr_err("tsx: invalid option, defaulting to off\n");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* tsx= not provided */
|
||||||
|
if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
|
||||||
|
tsx_ctrl_state = x86_get_tsx_auto_mode();
|
||||||
|
else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
|
||||||
|
tsx_ctrl_state = TSX_CTRL_DISABLE;
|
||||||
|
else
|
||||||
|
tsx_ctrl_state = TSX_CTRL_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
|
||||||
|
tsx_disable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tsx_disable() will change the state of the
|
||||||
|
* RTM CPUID bit. Clear it here since it is now
|
||||||
|
* expected to be not set.
|
||||||
|
*/
|
||||||
|
setup_clear_cpu_cap(X86_FEATURE_RTM);
|
||||||
|
} else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HW defaults TSX to be enabled at bootup.
|
||||||
|
* We may still need the TSX enable support
|
||||||
|
* during init for special cases like
|
||||||
|
* kexec after TSX is disabled.
|
||||||
|
*/
|
||||||
|
tsx_enable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tsx_enable() will change the state of the
|
||||||
|
* RTM CPUID bit. Force it here since it is now
|
||||||
|
* expected to be set.
|
||||||
|
*/
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RTM);
|
||||||
|
}
|
||||||
|
}
|
|
@ -94,6 +94,13 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
|
||||||
BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
|
BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
|
||||||
|
|
||||||
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
|
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
|
||||||
|
/*
|
||||||
|
* Handle the case where stack trace is collected _before_
|
||||||
|
* cea_exception_stacks had been initialized.
|
||||||
|
*/
|
||||||
|
if (!begin)
|
||||||
|
return false;
|
||||||
|
|
||||||
end = begin + sizeof(struct cea_exception_stacks);
|
end = begin + sizeof(struct cea_exception_stacks);
|
||||||
/* Bail if @stack is outside the exception stack area. */
|
/* Bail if @stack is outside the exception stack area. */
|
||||||
if (stk < begin || stk >= end)
|
if (stk < begin || stk >= end)
|
||||||
|
|
|
@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
|
||||||
*/
|
*/
|
||||||
{ PCI_VENDOR_ID_INTEL, 0x0f00,
|
{ PCI_VENDOR_ID_INTEL, 0x0f00,
|
||||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||||
|
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
|
||||||
|
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||||
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
|
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
|
||||||
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
|
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
|
||||||
{}
|
{}
|
||||||
|
|
|
@ -1505,6 +1505,9 @@ void __init tsc_init(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||||
|
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||||
|
|
||||||
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
||||||
detect_art();
|
detect_art();
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/kern_levels.h>
|
#include <linux/kern_levels.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/pat.h>
|
#include <asm/pat.h>
|
||||||
|
@ -47,6 +48,35 @@
|
||||||
#include <asm/kvm_page_track.h>
|
#include <asm/kvm_page_track.h>
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
|
extern bool itlb_multihit_kvm_mitigation;
|
||||||
|
|
||||||
|
static int __read_mostly nx_huge_pages = -1;
|
||||||
|
#ifdef CONFIG_PREEMPT_RT
|
||||||
|
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
|
||||||
|
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
|
||||||
|
#else
|
||||||
|
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
|
||||||
|
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
|
||||||
|
|
||||||
|
static struct kernel_param_ops nx_huge_pages_ops = {
|
||||||
|
.set = set_nx_huge_pages,
|
||||||
|
.get = param_get_bool,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
|
||||||
|
.set = set_nx_huge_pages_recovery_ratio,
|
||||||
|
.get = param_get_uint,
|
||||||
|
};
|
||||||
|
|
||||||
|
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
|
||||||
|
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
|
||||||
|
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
|
||||||
|
&nx_huge_pages_recovery_ratio, 0644);
|
||||||
|
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When setting this variable to true it enables Two-Dimensional-Paging
|
* When setting this variable to true it enables Two-Dimensional-Paging
|
||||||
* where the hardware walks 2 page tables:
|
* where the hardware walks 2 page tables:
|
||||||
|
@ -352,6 +382,11 @@ static inline bool spte_ad_need_write_protect(u64 spte)
|
||||||
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
|
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_nx_huge_page_enabled(void)
|
||||||
|
{
|
||||||
|
return READ_ONCE(nx_huge_pages);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 spte_shadow_accessed_mask(u64 spte)
|
static inline u64 spte_shadow_accessed_mask(u64 spte)
|
||||||
{
|
{
|
||||||
MMU_WARN_ON(is_mmio_spte(spte));
|
MMU_WARN_ON(is_mmio_spte(spte));
|
||||||
|
@ -1190,6 +1225,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
kvm_mmu_gfn_disallow_lpage(slot, gfn);
|
kvm_mmu_gfn_disallow_lpage(slot, gfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
|
{
|
||||||
|
if (sp->lpage_disallowed)
|
||||||
|
return;
|
||||||
|
|
||||||
|
++kvm->stat.nx_lpage_splits;
|
||||||
|
list_add_tail(&sp->lpage_disallowed_link,
|
||||||
|
&kvm->arch.lpage_disallowed_mmu_pages);
|
||||||
|
sp->lpage_disallowed = true;
|
||||||
|
}
|
||||||
|
|
||||||
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
{
|
{
|
||||||
struct kvm_memslots *slots;
|
struct kvm_memslots *slots;
|
||||||
|
@ -1207,6 +1253,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
|
{
|
||||||
|
--kvm->stat.nx_lpage_splits;
|
||||||
|
sp->lpage_disallowed = false;
|
||||||
|
list_del(&sp->lpage_disallowed_link);
|
||||||
|
}
|
||||||
|
|
||||||
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
|
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
|
||||||
struct kvm_memory_slot *slot)
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
|
@ -2792,6 +2845,9 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
|
||||||
kvm_reload_remote_mmus(kvm);
|
kvm_reload_remote_mmus(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sp->lpage_disallowed)
|
||||||
|
unaccount_huge_nx_page(kvm, sp);
|
||||||
|
|
||||||
sp->role.invalid = 1;
|
sp->role.invalid = 1;
|
||||||
return list_unstable;
|
return list_unstable;
|
||||||
}
|
}
|
||||||
|
@ -3013,6 +3069,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||||
if (!speculative)
|
if (!speculative)
|
||||||
spte |= spte_shadow_accessed_mask(spte);
|
spte |= spte_shadow_accessed_mask(spte);
|
||||||
|
|
||||||
|
if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
|
||||||
|
is_nx_huge_page_enabled()) {
|
||||||
|
pte_access &= ~ACC_EXEC_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
if (pte_access & ACC_EXEC_MASK)
|
if (pte_access & ACC_EXEC_MASK)
|
||||||
spte |= shadow_x_mask;
|
spte |= shadow_x_mask;
|
||||||
else
|
else
|
||||||
|
@ -3233,9 +3294,32 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||||
__direct_pte_prefetch(vcpu, sp, sptep);
|
__direct_pte_prefetch(vcpu, sp, sptep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
|
||||||
|
gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
|
||||||
|
{
|
||||||
|
int level = *levelp;
|
||||||
|
u64 spte = *it.sptep;
|
||||||
|
|
||||||
|
if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
|
||||||
|
is_nx_huge_page_enabled() &&
|
||||||
|
is_shadow_present_pte(spte) &&
|
||||||
|
!is_large_pte(spte)) {
|
||||||
|
/*
|
||||||
|
* A small SPTE exists for this pfn, but FNAME(fetch)
|
||||||
|
* and __direct_map would like to create a large PTE
|
||||||
|
* instead: just force them to go down another level,
|
||||||
|
* patching back for them into pfn the next 9 bits of
|
||||||
|
* the address.
|
||||||
|
*/
|
||||||
|
u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
|
||||||
|
*pfnp |= gfn & page_mask;
|
||||||
|
(*levelp)--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
|
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
|
||||||
int map_writable, int level, kvm_pfn_t pfn,
|
int map_writable, int level, kvm_pfn_t pfn,
|
||||||
bool prefault)
|
bool prefault, bool lpage_disallowed)
|
||||||
{
|
{
|
||||||
struct kvm_shadow_walk_iterator it;
|
struct kvm_shadow_walk_iterator it;
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
|
@ -3248,6 +3332,12 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
|
||||||
|
|
||||||
trace_kvm_mmu_spte_requested(gpa, level, pfn);
|
trace_kvm_mmu_spte_requested(gpa, level, pfn);
|
||||||
for_each_shadow_entry(vcpu, gpa, it) {
|
for_each_shadow_entry(vcpu, gpa, it) {
|
||||||
|
/*
|
||||||
|
* We cannot overwrite existing page tables with an NX
|
||||||
|
* large page, as the leaf could be executable.
|
||||||
|
*/
|
||||||
|
disallowed_hugepage_adjust(it, gfn, &pfn, &level);
|
||||||
|
|
||||||
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|
||||||
if (it.level == level)
|
if (it.level == level)
|
||||||
break;
|
break;
|
||||||
|
@ -3258,6 +3348,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
|
||||||
it.level - 1, true, ACC_ALL);
|
it.level - 1, true, ACC_ALL);
|
||||||
|
|
||||||
link_shadow_page(vcpu, it.sptep, sp);
|
link_shadow_page(vcpu, it.sptep, sp);
|
||||||
|
if (lpage_disallowed)
|
||||||
|
account_huge_nx_page(vcpu->kvm, sp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3306,7 +3398,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
|
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
|
||||||
level == PT_PAGE_TABLE_LEVEL &&
|
!kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
|
||||||
PageTransCompoundMap(pfn_to_page(pfn)) &&
|
PageTransCompoundMap(pfn_to_page(pfn)) &&
|
||||||
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
|
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
|
@ -3550,11 +3642,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
int level;
|
int level;
|
||||||
bool force_pt_level = false;
|
bool force_pt_level;
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
bool map_writable, write = error_code & PFERR_WRITE_MASK;
|
bool map_writable, write = error_code & PFERR_WRITE_MASK;
|
||||||
|
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
|
||||||
|
is_nx_huge_page_enabled();
|
||||||
|
|
||||||
|
force_pt_level = lpage_disallowed;
|
||||||
level = mapping_level(vcpu, gfn, &force_pt_level);
|
level = mapping_level(vcpu, gfn, &force_pt_level);
|
||||||
if (likely(!force_pt_level)) {
|
if (likely(!force_pt_level)) {
|
||||||
/*
|
/*
|
||||||
|
@ -3588,7 +3683,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
if (likely(!force_pt_level))
|
if (likely(!force_pt_level))
|
||||||
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
||||||
r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
|
r = __direct_map(vcpu, v, write, map_writable, level, pfn,
|
||||||
|
prefault, false);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
kvm_release_pfn_clean(pfn);
|
kvm_release_pfn_clean(pfn);
|
||||||
|
@ -4174,6 +4270,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
int write = error_code & PFERR_WRITE_MASK;
|
int write = error_code & PFERR_WRITE_MASK;
|
||||||
bool map_writable;
|
bool map_writable;
|
||||||
|
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
|
||||||
|
is_nx_huge_page_enabled();
|
||||||
|
|
||||||
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
||||||
|
|
||||||
|
@ -4184,8 +4282,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
|
force_pt_level =
|
||||||
PT_DIRECTORY_LEVEL);
|
lpage_disallowed ||
|
||||||
|
!check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
|
||||||
level = mapping_level(vcpu, gfn, &force_pt_level);
|
level = mapping_level(vcpu, gfn, &force_pt_level);
|
||||||
if (likely(!force_pt_level)) {
|
if (likely(!force_pt_level)) {
|
||||||
if (level > PT_DIRECTORY_LEVEL &&
|
if (level > PT_DIRECTORY_LEVEL &&
|
||||||
|
@ -4214,7 +4313,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
if (likely(!force_pt_level))
|
if (likely(!force_pt_level))
|
||||||
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
||||||
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
|
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
|
||||||
|
prefault, lpage_disallowed);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
kvm_release_pfn_clean(pfn);
|
kvm_release_pfn_clean(pfn);
|
||||||
|
@ -5914,9 +6014,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
||||||
* the guest, and the guest page table is using 4K page size
|
* the guest, and the guest page table is using 4K page size
|
||||||
* mapping if the indirect sp has level = 1.
|
* mapping if the indirect sp has level = 1.
|
||||||
*/
|
*/
|
||||||
if (sp->role.direct &&
|
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
|
||||||
!kvm_is_reserved_pfn(pfn) &&
|
!kvm_is_zone_device_pfn(pfn) &&
|
||||||
PageTransCompoundMap(pfn_to_page(pfn))) {
|
PageTransCompoundMap(pfn_to_page(pfn))) {
|
||||||
pte_list_remove(rmap_head, sptep);
|
pte_list_remove(rmap_head, sptep);
|
||||||
|
|
||||||
if (kvm_available_flush_tlb_with_range())
|
if (kvm_available_flush_tlb_with_range())
|
||||||
|
@ -6155,10 +6255,59 @@ static void kvm_set_mmio_spte_mask(void)
|
||||||
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
|
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool get_nx_auto_mode(void)
|
||||||
|
{
|
||||||
|
/* Return true when CPU has the bug, and mitigations are ON */
|
||||||
|
return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __set_nx_huge_pages(bool val)
|
||||||
|
{
|
||||||
|
nx_huge_pages = itlb_multihit_kvm_mitigation = val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
bool old_val = nx_huge_pages;
|
||||||
|
bool new_val;
|
||||||
|
|
||||||
|
/* In "auto" mode deploy workaround only if CPU has the bug. */
|
||||||
|
if (sysfs_streq(val, "off"))
|
||||||
|
new_val = 0;
|
||||||
|
else if (sysfs_streq(val, "force"))
|
||||||
|
new_val = 1;
|
||||||
|
else if (sysfs_streq(val, "auto"))
|
||||||
|
new_val = get_nx_auto_mode();
|
||||||
|
else if (strtobool(val, &new_val) < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
__set_nx_huge_pages(new_val);
|
||||||
|
|
||||||
|
if (new_val != old_val) {
|
||||||
|
struct kvm *kvm;
|
||||||
|
|
||||||
|
mutex_lock(&kvm_lock);
|
||||||
|
|
||||||
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||||
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
kvm_mmu_zap_all_fast(kvm);
|
||||||
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
|
wake_up_process(kvm->arch.nx_lpage_recovery_thread);
|
||||||
|
}
|
||||||
|
mutex_unlock(&kvm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int kvm_mmu_module_init(void)
|
int kvm_mmu_module_init(void)
|
||||||
{
|
{
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
|
if (nx_huge_pages == -1)
|
||||||
|
__set_nx_huge_pages(get_nx_auto_mode());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MMU roles use union aliasing which is, generally speaking, an
|
* MMU roles use union aliasing which is, generally speaking, an
|
||||||
* undefined behavior. However, we supposedly know how compilers behave
|
* undefined behavior. However, we supposedly know how compilers behave
|
||||||
|
@ -6238,3 +6387,116 @@ void kvm_mmu_module_exit(void)
|
||||||
unregister_shrinker(&mmu_shrinker);
|
unregister_shrinker(&mmu_shrinker);
|
||||||
mmu_audit_disable();
|
mmu_audit_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
unsigned int old_val;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
old_val = nx_huge_pages_recovery_ratio;
|
||||||
|
err = param_set_uint(val, kp);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (READ_ONCE(nx_huge_pages) &&
|
||||||
|
!old_val && nx_huge_pages_recovery_ratio) {
|
||||||
|
struct kvm *kvm;
|
||||||
|
|
||||||
|
mutex_lock(&kvm_lock);
|
||||||
|
|
||||||
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||||
|
wake_up_process(kvm->arch.nx_lpage_recovery_thread);
|
||||||
|
|
||||||
|
mutex_unlock(&kvm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvm_recover_nx_lpages(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
int rcu_idx;
|
||||||
|
struct kvm_mmu_page *sp;
|
||||||
|
unsigned int ratio;
|
||||||
|
LIST_HEAD(invalid_list);
|
||||||
|
ulong to_zap;
|
||||||
|
|
||||||
|
rcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
|
ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
|
||||||
|
to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
|
||||||
|
while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
|
||||||
|
/*
|
||||||
|
* We use a separate list instead of just using active_mmu_pages
|
||||||
|
* because the number of lpage_disallowed pages is expected to
|
||||||
|
* be relatively small compared to the total.
|
||||||
|
*/
|
||||||
|
sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
|
||||||
|
struct kvm_mmu_page,
|
||||||
|
lpage_disallowed_link);
|
||||||
|
WARN_ON_ONCE(!sp->lpage_disallowed);
|
||||||
|
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
||||||
|
WARN_ON_ONCE(sp->lpage_disallowed);
|
||||||
|
|
||||||
|
if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||||
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||||
|
if (to_zap)
|
||||||
|
cond_resched_lock(&kvm->mmu_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
srcu_read_unlock(&kvm->srcu, rcu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static long get_nx_lpage_recovery_timeout(u64 start_time)
|
||||||
|
{
|
||||||
|
return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
|
||||||
|
? start_time + 60 * HZ - get_jiffies_64()
|
||||||
|
: MAX_SCHEDULE_TIMEOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
|
||||||
|
{
|
||||||
|
u64 start_time;
|
||||||
|
long remaining_time;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
start_time = get_jiffies_64();
|
||||||
|
remaining_time = get_nx_lpage_recovery_timeout(start_time);
|
||||||
|
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
while (!kthread_should_stop() && remaining_time > 0) {
|
||||||
|
schedule_timeout(remaining_time);
|
||||||
|
remaining_time = get_nx_lpage_recovery_timeout(start_time);
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
|
if (kthread_should_stop())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
kvm_recover_nx_lpages(kvm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
|
||||||
|
"kvm-nx-lpage-recovery",
|
||||||
|
&kvm->arch.nx_lpage_recovery_thread);
|
||||||
|
if (!err)
|
||||||
|
kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
if (kvm->arch.nx_lpage_recovery_thread)
|
||||||
|
kthread_stop(kvm->arch.nx_lpage_recovery_thread);
|
||||||
|
}
|
||||||
|
|
|
@ -210,4 +210,8 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||||
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot, u64 gfn);
|
struct kvm_memory_slot *slot, u64 gfn);
|
||||||
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
|
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
int kvm_mmu_post_init_vm(struct kvm *kvm);
|
||||||
|
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -614,13 +614,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||||
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||||
struct guest_walker *gw,
|
struct guest_walker *gw,
|
||||||
int write_fault, int hlevel,
|
int write_fault, int hlevel,
|
||||||
kvm_pfn_t pfn, bool map_writable, bool prefault)
|
kvm_pfn_t pfn, bool map_writable, bool prefault,
|
||||||
|
bool lpage_disallowed)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp = NULL;
|
struct kvm_mmu_page *sp = NULL;
|
||||||
struct kvm_shadow_walk_iterator it;
|
struct kvm_shadow_walk_iterator it;
|
||||||
unsigned direct_access, access = gw->pt_access;
|
unsigned direct_access, access = gw->pt_access;
|
||||||
int top_level, ret;
|
int top_level, ret;
|
||||||
gfn_t base_gfn;
|
gfn_t gfn, base_gfn;
|
||||||
|
|
||||||
direct_access = gw->pte_access;
|
direct_access = gw->pte_access;
|
||||||
|
|
||||||
|
@ -665,13 +666,25 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||||
link_shadow_page(vcpu, it.sptep, sp);
|
link_shadow_page(vcpu, it.sptep, sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
base_gfn = gw->gfn;
|
/*
|
||||||
|
* FNAME(page_fault) might have clobbered the bottom bits of
|
||||||
|
* gw->gfn, restore them from the virtual address.
|
||||||
|
*/
|
||||||
|
gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
|
||||||
|
base_gfn = gfn;
|
||||||
|
|
||||||
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
|
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
|
||||||
|
|
||||||
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
|
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
|
||||||
clear_sp_write_flooding_count(it.sptep);
|
clear_sp_write_flooding_count(it.sptep);
|
||||||
base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|
|
||||||
|
/*
|
||||||
|
* We cannot overwrite existing page tables with an NX
|
||||||
|
* large page, as the leaf could be executable.
|
||||||
|
*/
|
||||||
|
disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
|
||||||
|
|
||||||
|
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|
||||||
if (it.level == hlevel)
|
if (it.level == hlevel)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -683,6 +696,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||||
sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
|
sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
|
||||||
it.level - 1, true, direct_access);
|
it.level - 1, true, direct_access);
|
||||||
link_shadow_page(vcpu, it.sptep, sp);
|
link_shadow_page(vcpu, it.sptep, sp);
|
||||||
|
if (lpage_disallowed)
|
||||||
|
account_huge_nx_page(vcpu->kvm, sp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -759,9 +774,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||||
int r;
|
int r;
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
int level = PT_PAGE_TABLE_LEVEL;
|
int level = PT_PAGE_TABLE_LEVEL;
|
||||||
bool force_pt_level = false;
|
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
bool map_writable, is_self_change_mapping;
|
bool map_writable, is_self_change_mapping;
|
||||||
|
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
|
||||||
|
is_nx_huge_page_enabled();
|
||||||
|
bool force_pt_level = lpage_disallowed;
|
||||||
|
|
||||||
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
||||||
|
|
||||||
|
@ -851,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||||
if (!force_pt_level)
|
if (!force_pt_level)
|
||||||
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
|
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
|
||||||
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
|
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
|
||||||
level, pfn, map_writable, prefault);
|
level, pfn, map_writable, prefault, lpage_disallowed);
|
||||||
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
|
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
|
|
@ -1268,6 +1268,18 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
|
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
|
||||||
|
* PI.NDST: pi_post_block is the one expected to change PID.NDST and the
|
||||||
|
* wakeup handler expects the vCPU to be on the blocked_vcpu_list that
|
||||||
|
* matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
|
||||||
|
* correctly.
|
||||||
|
*/
|
||||||
|
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
|
||||||
|
pi_clear_sn(pi_desc);
|
||||||
|
goto after_clear_sn;
|
||||||
|
}
|
||||||
|
|
||||||
/* The full case. */
|
/* The full case. */
|
||||||
do {
|
do {
|
||||||
old.control = new.control = pi_desc->control;
|
old.control = new.control = pi_desc->control;
|
||||||
|
@ -1283,6 +1295,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
} while (cmpxchg64(&pi_desc->control, old.control,
|
} while (cmpxchg64(&pi_desc->control, old.control,
|
||||||
new.control) != old.control);
|
new.control) != old.control);
|
||||||
|
|
||||||
|
after_clear_sn:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear SN before reading the bitmap. The VT-d firmware
|
* Clear SN before reading the bitmap. The VT-d firmware
|
||||||
* writes the bitmap and reads SN atomically (5.2.3 in the
|
* writes the bitmap and reads SN atomically (5.2.3 in the
|
||||||
|
@ -1291,7 +1305,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
|
|
||||||
if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
|
if (!pi_is_pir_empty(pi_desc))
|
||||||
pi_set_on(pi_desc);
|
pi_set_on(pi_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6137,7 +6151,7 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||||
if (pi_test_on(&vmx->pi_desc)) {
|
if (pi_test_on(&vmx->pi_desc)) {
|
||||||
pi_clear_on(&vmx->pi_desc);
|
pi_clear_on(&vmx->pi_desc);
|
||||||
/*
|
/*
|
||||||
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
|
* IOMMU can write to PID.ON, so the barrier matters even on UP.
|
||||||
* But on x86 this is just a compiler barrier anyway.
|
* But on x86 this is just a compiler barrier anyway.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
|
@ -6167,7 +6181,10 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return pi_test_on(vcpu_to_pi_desc(vcpu));
|
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
||||||
|
|
||||||
|
return pi_test_on(pi_desc) ||
|
||||||
|
(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||||
|
|
|
@ -355,6 +355,11 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
|
||||||
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
|
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
|
||||||
|
{
|
||||||
|
return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void pi_set_sn(struct pi_desc *pi_desc)
|
static inline void pi_set_sn(struct pi_desc *pi_desc)
|
||||||
{
|
{
|
||||||
set_bit(POSTED_INTR_SN,
|
set_bit(POSTED_INTR_SN,
|
||||||
|
@ -373,6 +378,12 @@ static inline void pi_clear_on(struct pi_desc *pi_desc)
|
||||||
(unsigned long *)&pi_desc->control);
|
(unsigned long *)&pi_desc->control);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pi_clear_sn(struct pi_desc *pi_desc)
|
||||||
|
{
|
||||||
|
clear_bit(POSTED_INTR_SN,
|
||||||
|
(unsigned long *)&pi_desc->control);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int pi_test_on(struct pi_desc *pi_desc)
|
static inline int pi_test_on(struct pi_desc *pi_desc)
|
||||||
{
|
{
|
||||||
return test_bit(POSTED_INTR_ON,
|
return test_bit(POSTED_INTR_ON,
|
||||||
|
|
|
@ -213,6 +213,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||||
{ "mmu_unsync", VM_STAT(mmu_unsync) },
|
{ "mmu_unsync", VM_STAT(mmu_unsync) },
|
||||||
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
||||||
{ "largepages", VM_STAT(lpages, .mode = 0444) },
|
{ "largepages", VM_STAT(lpages, .mode = 0444) },
|
||||||
|
{ "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) },
|
||||||
{ "max_mmu_page_hash_collisions",
|
{ "max_mmu_page_hash_collisions",
|
||||||
VM_STAT(max_mmu_page_hash_collisions) },
|
VM_STAT(max_mmu_page_hash_collisions) },
|
||||||
{ NULL }
|
{ NULL }
|
||||||
|
@ -1132,13 +1133,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
|
||||||
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
|
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
|
||||||
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
|
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
|
||||||
*
|
*
|
||||||
* This list is modified at module load time to reflect the
|
* The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
|
||||||
|
* extract the supported MSRs from the related const lists.
|
||||||
|
* msrs_to_save is selected from the msrs_to_save_all to reflect the
|
||||||
* capabilities of the host cpu. This capabilities test skips MSRs that are
|
* capabilities of the host cpu. This capabilities test skips MSRs that are
|
||||||
* kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
|
* kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
|
||||||
* may depend on host virtualization features rather than host cpu features.
|
* may depend on host virtualization features rather than host cpu features.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static u32 msrs_to_save[] = {
|
static const u32 msrs_to_save_all[] = {
|
||||||
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
|
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
|
||||||
MSR_STAR,
|
MSR_STAR,
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -1179,9 +1182,10 @@ static u32 msrs_to_save[] = {
|
||||||
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
|
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
|
||||||
static unsigned num_msrs_to_save;
|
static unsigned num_msrs_to_save;
|
||||||
|
|
||||||
static u32 emulated_msrs[] = {
|
static const u32 emulated_msrs_all[] = {
|
||||||
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
|
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
|
||||||
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
|
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
|
||||||
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
|
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
|
||||||
|
@ -1220,7 +1224,7 @@ static u32 emulated_msrs[] = {
|
||||||
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
|
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
|
||||||
* We always support the "true" VMX control MSRs, even if the host
|
* We always support the "true" VMX control MSRs, even if the host
|
||||||
* processor does not, so I am putting these registers here rather
|
* processor does not, so I am putting these registers here rather
|
||||||
* than in msrs_to_save.
|
* than in msrs_to_save_all.
|
||||||
*/
|
*/
|
||||||
MSR_IA32_VMX_BASIC,
|
MSR_IA32_VMX_BASIC,
|
||||||
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
||||||
|
@ -1239,13 +1243,14 @@ static u32 emulated_msrs[] = {
|
||||||
MSR_KVM_POLL_CONTROL,
|
MSR_KVM_POLL_CONTROL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
|
||||||
static unsigned num_emulated_msrs;
|
static unsigned num_emulated_msrs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of msr numbers which are used to expose MSR-based features that
|
* List of msr numbers which are used to expose MSR-based features that
|
||||||
* can be used by a hypervisor to validate requested CPU features.
|
* can be used by a hypervisor to validate requested CPU features.
|
||||||
*/
|
*/
|
||||||
static u32 msr_based_features[] = {
|
static const u32 msr_based_features_all[] = {
|
||||||
MSR_IA32_VMX_BASIC,
|
MSR_IA32_VMX_BASIC,
|
||||||
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
||||||
MSR_IA32_VMX_PINBASED_CTLS,
|
MSR_IA32_VMX_PINBASED_CTLS,
|
||||||
|
@ -1270,6 +1275,7 @@ static u32 msr_based_features[] = {
|
||||||
MSR_IA32_ARCH_CAPABILITIES,
|
MSR_IA32_ARCH_CAPABILITIES,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
|
||||||
static unsigned int num_msr_based_features;
|
static unsigned int num_msr_based_features;
|
||||||
|
|
||||||
static u64 kvm_get_arch_capabilities(void)
|
static u64 kvm_get_arch_capabilities(void)
|
||||||
|
@ -1279,6 +1285,14 @@ static u64 kvm_get_arch_capabilities(void)
|
||||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
||||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
|
||||||
|
* the nested hypervisor runs with NX huge pages. If it is not,
|
||||||
|
* L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
|
||||||
|
* L1 guests, so it need not worry about its own (L2) guests.
|
||||||
|
*/
|
||||||
|
data |= ARCH_CAP_PSCHANGE_MC_NO;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we're doing cache flushes (either "always" or "cond")
|
* If we're doing cache flushes (either "always" or "cond")
|
||||||
* we will do one whenever the guest does a vmlaunch/vmresume.
|
* we will do one whenever the guest does a vmlaunch/vmresume.
|
||||||
|
@ -1298,6 +1312,25 @@ static u64 kvm_get_arch_capabilities(void)
|
||||||
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
||||||
data |= ARCH_CAP_MDS_NO;
|
data |= ARCH_CAP_MDS_NO;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On TAA affected systems, export MDS_NO=0 when:
|
||||||
|
* - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1.
|
||||||
|
* - Updated microcode is present. This is detected by
|
||||||
|
* the presence of ARCH_CAP_TSX_CTRL_MSR and ensures
|
||||||
|
* that VERW clears CPU buffers.
|
||||||
|
*
|
||||||
|
* When MDS_NO=0 is exported, guests deploy clear CPU buffer
|
||||||
|
* mitigation and don't complain:
|
||||||
|
*
|
||||||
|
* "Vulnerable: Clear CPU buffers attempted, no microcode"
|
||||||
|
*
|
||||||
|
* If TSX is disabled on the system, guests are also mitigated against
|
||||||
|
* TAA and clear CPU buffer mitigation is not required for guests.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) &&
|
||||||
|
(data & ARCH_CAP_TSX_CTRL_MSR))
|
||||||
|
data &= ~ARCH_CAP_MDS_NO;
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5090,22 +5123,26 @@ static void kvm_init_msr_list(void)
|
||||||
{
|
{
|
||||||
struct x86_pmu_capability x86_pmu;
|
struct x86_pmu_capability x86_pmu;
|
||||||
u32 dummy[2];
|
u32 dummy[2];
|
||||||
unsigned i, j;
|
unsigned i;
|
||||||
|
|
||||||
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
|
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
|
||||||
"Please update the fixed PMCs in msrs_to_save[]");
|
"Please update the fixed PMCs in msrs_to_saved_all[]");
|
||||||
|
|
||||||
perf_get_x86_pmu_capability(&x86_pmu);
|
perf_get_x86_pmu_capability(&x86_pmu);
|
||||||
|
|
||||||
for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
|
num_msrs_to_save = 0;
|
||||||
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
|
num_emulated_msrs = 0;
|
||||||
|
num_msr_based_features = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
|
||||||
|
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even MSRs that are valid in the host may not be exposed
|
* Even MSRs that are valid in the host may not be exposed
|
||||||
* to the guests in some cases.
|
* to the guests in some cases.
|
||||||
*/
|
*/
|
||||||
switch (msrs_to_save[i]) {
|
switch (msrs_to_save_all[i]) {
|
||||||
case MSR_IA32_BNDCFGS:
|
case MSR_IA32_BNDCFGS:
|
||||||
if (!kvm_mpx_supported())
|
if (!kvm_mpx_supported())
|
||||||
continue;
|
continue;
|
||||||
|
@ -5133,17 +5170,17 @@ static void kvm_init_msr_list(void)
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
|
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
|
||||||
if (!kvm_x86_ops->pt_supported() ||
|
if (!kvm_x86_ops->pt_supported() ||
|
||||||
msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >=
|
msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
|
||||||
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
|
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
|
||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
|
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
|
||||||
if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
|
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
|
||||||
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
|
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
|
||||||
if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
|
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
|
||||||
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -5151,34 +5188,25 @@ static void kvm_init_msr_list(void)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (j < i)
|
msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
|
||||||
msrs_to_save[j] = msrs_to_save[i];
|
|
||||||
j++;
|
|
||||||
}
|
}
|
||||||
num_msrs_to_save = j;
|
|
||||||
|
|
||||||
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
|
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
|
||||||
if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
|
if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (j < i)
|
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
|
||||||
emulated_msrs[j] = emulated_msrs[i];
|
|
||||||
j++;
|
|
||||||
}
|
}
|
||||||
num_emulated_msrs = j;
|
|
||||||
|
|
||||||
for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
|
for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
|
||||||
struct kvm_msr_entry msr;
|
struct kvm_msr_entry msr;
|
||||||
|
|
||||||
msr.index = msr_based_features[i];
|
msr.index = msr_based_features_all[i];
|
||||||
if (kvm_get_msr_feature(&msr))
|
if (kvm_get_msr_feature(&msr))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (j < i)
|
msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
|
||||||
msr_based_features[j] = msr_based_features[i];
|
|
||||||
j++;
|
|
||||||
}
|
}
|
||||||
num_msr_based_features = j;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
|
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
|
||||||
|
@ -9428,6 +9456,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
|
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
|
||||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||||
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
||||||
|
INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
|
||||||
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
|
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
|
||||||
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
|
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
|
||||||
|
|
||||||
|
@ -9456,6 +9485,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
return kvm_x86_ops->vm_init(kvm);
|
return kvm_x86_ops->vm_init(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_arch_post_init_vm(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return kvm_mmu_post_init_vm(kvm);
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu_load(vcpu);
|
vcpu_load(vcpu);
|
||||||
|
@ -9557,6 +9591,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(x86_set_memory_region);
|
EXPORT_SYMBOL_GPL(x86_set_memory_region);
|
||||||
|
|
||||||
|
void kvm_arch_pre_destroy_vm(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
kvm_mmu_pre_destroy_vm(kvm);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
if (current->mm == kvm->mm) {
|
if (current->mm == kvm->mm) {
|
||||||
|
|
|
@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static
|
||||||
|
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* To prevent bfqq's service guarantees from being violated,
|
||||||
|
* bfqq may be left busy, i.e., queued for service, even if
|
||||||
|
* empty (see comments in __bfq_bfqq_expire() for
|
||||||
|
* details). But, if no process will send requests to bfqq any
|
||||||
|
* longer, then there is no point in keeping bfqq queued for
|
||||||
|
* service. In addition, keeping bfqq queued for service, but
|
||||||
|
* with no process ref any longer, may have caused bfqq to be
|
||||||
|
* freed when dequeued from service. But this is assumed to
|
||||||
|
* never happen.
|
||||||
|
*/
|
||||||
|
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
|
||||||
|
bfqq != bfqd->in_service_queue)
|
||||||
|
bfq_del_bfqq_busy(bfqd, bfqq, false);
|
||||||
|
|
||||||
|
bfq_put_queue(bfqq);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||||
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||||
|
@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||||
*/
|
*/
|
||||||
new_bfqq->pid = -1;
|
new_bfqq->pid = -1;
|
||||||
bfqq->bic = NULL;
|
bfqq->bic = NULL;
|
||||||
/* release process reference to bfqq */
|
bfq_release_process_ref(bfqd, bfqq);
|
||||||
bfq_put_queue(bfqq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||||
|
@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||||
|
|
||||||
bfq_put_cooperator(bfqq);
|
bfq_put_cooperator(bfqq);
|
||||||
|
|
||||||
bfq_put_queue(bfqq); /* release process reference */
|
bfq_release_process_ref(bfqd, bfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
||||||
|
@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
||||||
|
|
||||||
bfqq = bic_to_bfqq(bic, false);
|
bfqq = bic_to_bfqq(bic, false);
|
||||||
if (bfqq) {
|
if (bfqq) {
|
||||||
/* release process reference on this queue */
|
bfq_release_process_ref(bfqd, bfqq);
|
||||||
bfq_put_queue(bfqq);
|
|
||||||
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
|
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
|
||||||
bic_set_bfqq(bic, bfqq, false);
|
bic_set_bfqq(bic, bfqq, false);
|
||||||
}
|
}
|
||||||
|
@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
|
||||||
|
|
||||||
bfq_put_cooperator(bfqq);
|
bfq_put_cooperator(bfqq);
|
||||||
|
|
||||||
bfq_put_queue(bfqq);
|
bfq_release_process_ref(bfqq->bfqd, bfqq);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (bio->bi_vcnt > 0) {
|
if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
|
||||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||||
|
|
||||||
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
||||||
|
|
|
@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||||
atomic64_set(&iocg->active_period, cur_period);
|
atomic64_set(&iocg->active_period, cur_period);
|
||||||
|
|
||||||
/* already activated or breaking leaf-only constraint? */
|
/* already activated or breaking leaf-only constraint? */
|
||||||
for (i = iocg->level; i > 0; i--)
|
if (!list_empty(&iocg->active_list))
|
||||||
if (!list_empty(&iocg->active_list))
|
goto succeed_unlock;
|
||||||
|
for (i = iocg->level - 1; i > 0; i--)
|
||||||
|
if (!list_empty(&iocg->ancestors[i]->active_list))
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
|
|
||||||
if (iocg->child_active_sum)
|
if (iocg->child_active_sum)
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
|
|
||||||
|
@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||||
ioc_start_period(ioc, now);
|
ioc_start_period(ioc, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
succeed_unlock:
|
||||||
spin_unlock_irq(&ioc->lock);
|
spin_unlock_irq(&ioc->lock);
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
|
|
@ -554,12 +554,27 @@ ssize_t __weak cpu_show_mds(struct device *dev,
|
||||||
return sprintf(buf, "Not affected\n");
|
return sprintf(buf, "Not affected\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "Not affected\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "Not affected\n");
|
||||||
|
}
|
||||||
|
|
||||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||||
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
||||||
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
|
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
|
||||||
|
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
|
||||||
|
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
|
||||||
|
|
||||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||||
&dev_attr_meltdown.attr,
|
&dev_attr_meltdown.attr,
|
||||||
|
@ -568,6 +583,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||||
&dev_attr_spec_store_bypass.attr,
|
&dev_attr_spec_store_bypass.attr,
|
||||||
&dev_attr_l1tf.attr,
|
&dev_attr_l1tf.attr,
|
||||||
&dev_attr_mds.attr,
|
&dev_attr_mds.attr,
|
||||||
|
&dev_attr_tsx_async_abort.attr,
|
||||||
|
&dev_attr_itlb_multihit.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct for_each_memory_block_cb_data {
|
||||||
|
walk_memory_blocks_func_t func;
|
||||||
|
void *arg;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int for_each_memory_block_cb(struct device *dev, void *data)
|
||||||
|
{
|
||||||
|
struct memory_block *mem = to_memory_block(dev);
|
||||||
|
struct for_each_memory_block_cb_data *cb_data = data;
|
||||||
|
|
||||||
|
return cb_data->func(mem, cb_data->arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for_each_memory_block - walk through all present memory blocks
|
||||||
|
*
|
||||||
|
* @arg: argument passed to func
|
||||||
|
* @func: callback for each memory block walked
|
||||||
|
*
|
||||||
|
* This function walks through all present memory blocks, calling func on
|
||||||
|
* each memory block.
|
||||||
|
*
|
||||||
|
* In case func() returns an error, walking is aborted and the error is
|
||||||
|
* returned.
|
||||||
|
*/
|
||||||
|
int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
|
||||||
|
{
|
||||||
|
struct for_each_memory_block_cb_data cb_data = {
|
||||||
|
.func = func,
|
||||||
|
.arg = arg,
|
||||||
|
};
|
||||||
|
|
||||||
|
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
|
||||||
|
for_each_memory_block_cb);
|
||||||
|
}
|
||||||
|
|
|
@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
|
||||||
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
|
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
|
||||||
struct ceph_osd_data *osd_data;
|
struct ceph_osd_data *osd_data;
|
||||||
u64 objno;
|
u64 objno;
|
||||||
u8 state, new_state, current_state;
|
u8 state, new_state, uninitialized_var(current_state);
|
||||||
bool has_current_state;
|
bool has_current_state;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
|
|
|
@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
|
||||||
|
|
||||||
cancel_work_sync(&card->event_work);
|
cancel_work_sync(&card->event_work);
|
||||||
|
|
||||||
|
destroy_workqueue(card->event_wq);
|
||||||
rsxx_destroy_dev(card);
|
rsxx_destroy_dev(card);
|
||||||
rsxx_dma_destroy(card);
|
rsxx_dma_destroy(card);
|
||||||
|
destroy_workqueue(card->creg_ctrl.creg_wq);
|
||||||
|
|
||||||
spin_lock_irqsave(&card->irq_lock, flags);
|
spin_lock_irqsave(&card->irq_lock, flags);
|
||||||
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
|
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/freezer.h>
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/hw_random.h>
|
#include <linux/hw_random.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -422,9 +421,7 @@ static int hwrng_fillfn(void *unused)
|
||||||
{
|
{
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
set_freezable();
|
while (!kthread_should_stop()) {
|
||||||
|
|
||||||
while (!kthread_freezable_should_stop(NULL)) {
|
|
||||||
struct hwrng *rng;
|
struct hwrng *rng;
|
||||||
|
|
||||||
rng = get_current_rng();
|
rng = get_current_rng();
|
||||||
|
|
|
@ -327,7 +327,6 @@
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/cryptohash.h>
|
#include <linux/cryptohash.h>
|
||||||
#include <linux/fips.h>
|
#include <linux/fips.h>
|
||||||
#include <linux/freezer.h>
|
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
||||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||||
* or when the calling thread is about to terminate.
|
* or when the calling thread is about to terminate.
|
||||||
*/
|
*/
|
||||||
wait_event_freezable(random_write_wait,
|
wait_event_interruptible(random_write_wait, kthread_should_stop() ||
|
||||||
kthread_should_stop() ||
|
|
||||||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
|
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
|
||||||
mix_pool_bytes(poolp, buffer, count);
|
mix_pool_bytes(poolp, buffer, count);
|
||||||
credit_entropy_bits(poolp, entropy);
|
credit_entropy_bits(poolp, entropy);
|
||||||
|
|
|
@ -328,12 +328,13 @@ static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const unsigned int sh_mtu2_channel_offsets[] = {
|
||||||
|
0x300, 0x380, 0x000,
|
||||||
|
};
|
||||||
|
|
||||||
static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
|
static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
|
||||||
struct sh_mtu2_device *mtu)
|
struct sh_mtu2_device *mtu)
|
||||||
{
|
{
|
||||||
static const unsigned int channel_offsets[] = {
|
|
||||||
0x300, 0x380, 0x000,
|
|
||||||
};
|
|
||||||
char name[6];
|
char name[6];
|
||||||
int irq;
|
int irq;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -356,7 +357,7 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ch->base = mtu->mapbase + channel_offsets[index];
|
ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
|
||||||
ch->index = index;
|
ch->index = index;
|
||||||
|
|
||||||
return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
|
return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
|
||||||
|
@ -408,7 +409,12 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate and setup the channels. */
|
/* Allocate and setup the channels. */
|
||||||
mtu->num_channels = 3;
|
ret = platform_irq_count(pdev);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_unmap;
|
||||||
|
|
||||||
|
mtu->num_channels = min_t(unsigned int, ret,
|
||||||
|
ARRAY_SIZE(sh_mtu2_channel_offsets));
|
||||||
|
|
||||||
mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
|
mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
|
@ -268,15 +268,12 @@ static int __init mtk_syst_init(struct device_node *node)
|
||||||
|
|
||||||
ret = timer_of_init(node, &to);
|
ret = timer_of_init(node, &to);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
return ret;
|
||||||
|
|
||||||
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
|
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
|
||||||
TIMER_SYNC_TICKS, 0xffffffff);
|
TIMER_SYNC_TICKS, 0xffffffff);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
|
||||||
timer_of_cleanup(&to);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init mtk_gpt_init(struct device_node *node)
|
static int __init mtk_gpt_init(struct device_node *node)
|
||||||
|
@ -293,7 +290,7 @@ static int __init mtk_gpt_init(struct device_node *node)
|
||||||
|
|
||||||
ret = timer_of_init(node, &to);
|
ret = timer_of_init(node, &to);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
return ret;
|
||||||
|
|
||||||
/* Configure clock source */
|
/* Configure clock source */
|
||||||
mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
|
mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
|
||||||
|
@ -311,9 +308,6 @@ static int __init mtk_gpt_init(struct device_node *node)
|
||||||
mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
|
mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
|
||||||
timer_of_cleanup(&to);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
|
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
|
||||||
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
|
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
|
||||||
|
|
|
@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||||
struct amdgpu_firmware_info *ucode)
|
struct amdgpu_firmware_info *ucode)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
struct common_firmware_header *hdr;
|
||||||
(const struct sdma_firmware_header_v1_0 *)
|
|
||||||
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
|
||||||
const struct gfx_firmware_header_v1_0 *ce_hdr =
|
|
||||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
|
|
||||||
const struct gfx_firmware_header_v1_0 *pfp_hdr =
|
|
||||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
|
|
||||||
const struct gfx_firmware_header_v1_0 *me_hdr =
|
|
||||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
|
|
||||||
const struct gfx_firmware_header_v1_0 *mec_hdr =
|
|
||||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
|
||||||
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
|
||||||
(const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
|
||||||
const struct smc_firmware_header_v1_0 *smc_hdr =
|
|
||||||
(const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
|
|
||||||
|
|
||||||
switch (ucode->ucode_id) {
|
switch (ucode->ucode_id) {
|
||||||
case AMDGPU_UCODE_ID_SDMA0:
|
case AMDGPU_UCODE_ID_SDMA0:
|
||||||
|
@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||||
case AMDGPU_UCODE_ID_SDMA5:
|
case AMDGPU_UCODE_ID_SDMA5:
|
||||||
case AMDGPU_UCODE_ID_SDMA6:
|
case AMDGPU_UCODE_ID_SDMA6:
|
||||||
case AMDGPU_UCODE_ID_SDMA7:
|
case AMDGPU_UCODE_ID_SDMA7:
|
||||||
amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
|
hdr = (struct common_firmware_header *)
|
||||||
|
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
||||||
|
amdgpu_ucode_print_sdma_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_CE:
|
case AMDGPU_UCODE_ID_CP_CE:
|
||||||
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
|
hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
|
||||||
|
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_PFP:
|
case AMDGPU_UCODE_ID_CP_PFP:
|
||||||
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
|
hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
|
||||||
|
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_ME:
|
case AMDGPU_UCODE_ID_CP_ME:
|
||||||
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
|
hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
|
||||||
|
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||||
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
|
hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
|
||||||
|
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_RLC_G:
|
case AMDGPU_UCODE_ID_RLC_G:
|
||||||
amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
|
hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
|
||||||
|
amdgpu_ucode_print_rlc_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_SMC:
|
case AMDGPU_UCODE_ID_SMC:
|
||||||
amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
|
hdr = (struct common_firmware_header *)adev->pm.fw->data;
|
||||||
|
amdgpu_ucode_print_smc_hdr(hdr);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
|
||||||
|
|
||||||
power_domains->initializing = true;
|
power_domains->initializing = true;
|
||||||
|
|
||||||
|
/* Must happen before power domain init on VLV/CHV */
|
||||||
|
intel_update_rawclk(i915);
|
||||||
|
|
||||||
if (INTEL_GEN(i915) >= 11) {
|
if (INTEL_GEN(i915) >= 11) {
|
||||||
icl_display_core_init(i915, resume);
|
icl_display_core_init(i915, resume);
|
||||||
} else if (IS_CANNONLAKE(i915)) {
|
} else if (IS_CANNONLAKE(i915)) {
|
||||||
|
|
|
@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
|
||||||
free_engines(rcu_access_pointer(ctx->engines));
|
free_engines(rcu_access_pointer(ctx->engines));
|
||||||
mutex_destroy(&ctx->engines_mutex);
|
mutex_destroy(&ctx->engines_mutex);
|
||||||
|
|
||||||
|
kfree(ctx->jump_whitelist);
|
||||||
|
|
||||||
if (ctx->timeline)
|
if (ctx->timeline)
|
||||||
intel_timeline_put(ctx->timeline);
|
intel_timeline_put(ctx->timeline);
|
||||||
|
|
||||||
|
@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
|
||||||
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
||||||
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
||||||
|
|
||||||
|
ctx->jump_whitelist = NULL;
|
||||||
|
ctx->jump_whitelist_cmds = 0;
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
|
|
|
@ -192,6 +192,13 @@ struct i915_gem_context {
|
||||||
* per vm, which may be one per context or shared with the global GTT)
|
* per vm, which may be one per context or shared with the global GTT)
|
||||||
*/
|
*/
|
||||||
struct radix_tree_root handles_vma;
|
struct radix_tree_root handles_vma;
|
||||||
|
|
||||||
|
/** jump_whitelist: Bit array for tracking cmds during cmdparsing
|
||||||
|
* Guarded by struct_mutex
|
||||||
|
*/
|
||||||
|
unsigned long *jump_whitelist;
|
||||||
|
/** jump_whitelist_cmds: No of cmd slots available */
|
||||||
|
u32 jump_whitelist_cmds;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
|
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
|
||||||
|
|
|
@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
|
||||||
|
|
||||||
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
|
return intel_engine_requires_cmd_parser(eb->engine) ||
|
||||||
|
(intel_engine_using_cmd_parser(eb->engine) &&
|
||||||
|
eb->args->batch_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eb_create(struct i915_execbuffer *eb)
|
static int eb_create(struct i915_execbuffer *eb)
|
||||||
|
@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
|
static struct i915_vma *
|
||||||
|
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = eb->i915;
|
||||||
|
struct i915_vma * const vma = *eb->vma;
|
||||||
|
struct i915_address_space *vm;
|
||||||
|
u64 flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PPGTT backed shadow buffers must be mapped RO, to prevent
|
||||||
|
* post-scan tampering
|
||||||
|
*/
|
||||||
|
if (CMDPARSER_USES_GGTT(dev_priv)) {
|
||||||
|
flags = PIN_GLOBAL;
|
||||||
|
vm = &dev_priv->ggtt.vm;
|
||||||
|
} else if (vma->vm->has_read_only) {
|
||||||
|
flags = PIN_USER;
|
||||||
|
vm = vma->vm;
|
||||||
|
i915_gem_object_set_readonly(obj);
|
||||||
|
} else {
|
||||||
|
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
struct intel_engine_pool_node *pool;
|
struct intel_engine_pool_node *pool;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
u64 batch_start;
|
||||||
|
u64 shadow_batch_start;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
|
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
|
||||||
if (IS_ERR(pool))
|
if (IS_ERR(pool))
|
||||||
return ERR_CAST(pool);
|
return ERR_CAST(pool);
|
||||||
|
|
||||||
err = intel_engine_cmd_parser(eb->engine,
|
vma = shadow_batch_pin(eb, pool->obj);
|
||||||
|
if (IS_ERR(vma))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
batch_start = gen8_canonical_addr(eb->batch->node.start) +
|
||||||
|
eb->batch_start_offset;
|
||||||
|
|
||||||
|
shadow_batch_start = gen8_canonical_addr(vma->node.start);
|
||||||
|
|
||||||
|
err = intel_engine_cmd_parser(eb->gem_context,
|
||||||
|
eb->engine,
|
||||||
eb->batch->obj,
|
eb->batch->obj,
|
||||||
pool->obj,
|
batch_start,
|
||||||
eb->batch_start_offset,
|
eb->batch_start_offset,
|
||||||
eb->batch_len,
|
eb->batch_len,
|
||||||
is_master);
|
pool->obj,
|
||||||
|
shadow_batch_start);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == -EACCES) /* unhandled chained batch */
|
i915_vma_unpin(vma);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unsafe GGTT-backed buffers can still be submitted safely
|
||||||
|
* as non-secure.
|
||||||
|
* For PPGTT backing however, we have no choice but to forcibly
|
||||||
|
* reject unsafe buffers
|
||||||
|
*/
|
||||||
|
if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
|
||||||
|
/* Execute original buffer non-secure */
|
||||||
vma = NULL;
|
vma = NULL;
|
||||||
else
|
else
|
||||||
vma = ERR_PTR(err);
|
vma = ERR_PTR(err);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
|
|
||||||
if (IS_ERR(vma))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
eb->vma[eb->buffer_count] = i915_vma_get(vma);
|
eb->vma[eb->buffer_count] = i915_vma_get(vma);
|
||||||
eb->flags[eb->buffer_count] =
|
eb->flags[eb->buffer_count] =
|
||||||
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
|
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
|
||||||
vma->exec_flags = &eb->flags[eb->buffer_count];
|
vma->exec_flags = &eb->flags[eb->buffer_count];
|
||||||
eb->buffer_count++;
|
eb->buffer_count++;
|
||||||
|
|
||||||
|
eb->batch_start_offset = 0;
|
||||||
|
eb->batch = vma;
|
||||||
|
|
||||||
|
if (CMDPARSER_USES_GGTT(eb->i915))
|
||||||
|
eb->batch_flags |= I915_DISPATCH_SECURE;
|
||||||
|
|
||||||
|
/* eb->batch_len unchanged */
|
||||||
|
|
||||||
vma->private = pool;
|
vma->private = pool;
|
||||||
return vma;
|
return vma;
|
||||||
|
|
||||||
|
@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
struct drm_i915_gem_exec_object2 *exec,
|
struct drm_i915_gem_exec_object2 *exec,
|
||||||
struct drm_syncobj **fences)
|
struct drm_syncobj **fences)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *i915 = to_i915(dev);
|
||||||
struct i915_execbuffer eb;
|
struct i915_execbuffer eb;
|
||||||
struct dma_fence *in_fence = NULL;
|
struct dma_fence *in_fence = NULL;
|
||||||
struct dma_fence *exec_fence = NULL;
|
struct dma_fence *exec_fence = NULL;
|
||||||
|
@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
|
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
|
||||||
~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
||||||
|
|
||||||
eb.i915 = to_i915(dev);
|
eb.i915 = i915;
|
||||||
eb.file = file;
|
eb.file = file;
|
||||||
eb.args = args;
|
eb.args = args;
|
||||||
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
|
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
|
||||||
|
@ -2452,8 +2509,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
|
|
||||||
eb.batch_flags = 0;
|
eb.batch_flags = 0;
|
||||||
if (args->flags & I915_EXEC_SECURE) {
|
if (args->flags & I915_EXEC_SECURE) {
|
||||||
|
if (INTEL_GEN(i915) >= 11)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Return -EPERM to trigger fallback code on old binaries. */
|
||||||
|
if (!HAS_SECURE_BATCHES(i915))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
|
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
eb.batch_flags |= I915_DISPATCH_SECURE;
|
eb.batch_flags |= I915_DISPATCH_SECURE;
|
||||||
}
|
}
|
||||||
|
@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
goto err_vma;
|
goto err_vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (eb.batch_len == 0)
|
||||||
|
eb.batch_len = eb.batch->size - eb.batch_start_offset;
|
||||||
|
|
||||||
if (eb_use_cmdparser(&eb)) {
|
if (eb_use_cmdparser(&eb)) {
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
vma = eb_parse(&eb, drm_is_current_master(file));
|
vma = eb_parse(&eb);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
err = PTR_ERR(vma);
|
err = PTR_ERR(vma);
|
||||||
goto err_vma;
|
goto err_vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma) {
|
|
||||||
/*
|
|
||||||
* Batch parsed and accepted:
|
|
||||||
*
|
|
||||||
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
|
|
||||||
* bit from MI_BATCH_BUFFER_START commands issued in
|
|
||||||
* the dispatch_execbuffer implementations. We
|
|
||||||
* specifically don't want that set on batches the
|
|
||||||
* command parser has accepted.
|
|
||||||
*/
|
|
||||||
eb.batch_flags |= I915_DISPATCH_SECURE;
|
|
||||||
eb.batch_start_offset = 0;
|
|
||||||
eb.batch = vma;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eb.batch_len == 0)
|
|
||||||
eb.batch_len = eb.batch->size - eb.batch_start_offset;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||||
|
|
|
@ -475,12 +475,13 @@ struct intel_engine_cs {
|
||||||
|
|
||||||
struct intel_engine_hangcheck hangcheck;
|
struct intel_engine_hangcheck hangcheck;
|
||||||
|
|
||||||
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
|
#define I915_ENGINE_USING_CMD_PARSER BIT(0)
|
||||||
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
|
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
|
||||||
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
|
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
|
||||||
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
|
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
|
||||||
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
|
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
|
||||||
#define I915_ENGINE_IS_VIRTUAL BIT(5)
|
#define I915_ENGINE_IS_VIRTUAL BIT(5)
|
||||||
|
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -541,9 +542,15 @@ struct intel_engine_cs {
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
|
intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
|
return engine->flags & I915_ENGINE_USING_CMD_PARSER;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
|
|
|
@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf)
|
||||||
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
|
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
|
||||||
GEM_BUG_ON(!gt->awake);
|
GEM_BUG_ON(!gt->awake);
|
||||||
|
|
||||||
|
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
|
||||||
|
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
|
||||||
|
|
||||||
intel_enable_gt_powersave(i915);
|
intel_enable_gt_powersave(i915);
|
||||||
|
|
||||||
i915_update_gfx_val(i915);
|
i915_update_gfx_val(i915);
|
||||||
|
@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf)
|
||||||
if (INTEL_GEN(i915) >= 6)
|
if (INTEL_GEN(i915) >= 6)
|
||||||
gen6_rps_idle(i915);
|
gen6_rps_idle(i915);
|
||||||
|
|
||||||
|
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
|
||||||
|
i915_rc6_ctx_wa_check(i915);
|
||||||
|
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
|
||||||
|
}
|
||||||
|
|
||||||
/* Everything switched off, flush any residual interrupt just in case */
|
/* Everything switched off, flush any residual interrupt just in case */
|
||||||
intel_synchronize_irq(i915);
|
intel_synchronize_irq(i915);
|
||||||
|
|
||||||
|
|
|
@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
||||||
MOCS_ENTRY(15, \
|
MOCS_ENTRY(15, \
|
||||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
|
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
|
||||||
L3_3_WB), \
|
L3_3_WB), \
|
||||||
/* Bypass LLC - Uncached (EHL+) */ \
|
|
||||||
MOCS_ENTRY(16, \
|
|
||||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
|
|
||||||
L3_1_UC), \
|
|
||||||
/* Bypass LLC - L3 (Read-Only) (EHL+) */ \
|
|
||||||
MOCS_ENTRY(17, \
|
|
||||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
|
|
||||||
L3_3_WB), \
|
|
||||||
/* Self-Snoop - L3 + LLC */ \
|
/* Self-Snoop - L3 + LLC */ \
|
||||||
MOCS_ENTRY(18, \
|
MOCS_ENTRY(18, \
|
||||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
|
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
|
||||||
|
@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
|
||||||
L3_1_UC),
|
L3_1_UC),
|
||||||
/* HW Special Case (Displayable) */
|
/* HW Special Case (Displayable) */
|
||||||
MOCS_ENTRY(61,
|
MOCS_ENTRY(61,
|
||||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
|
LE_1_UC | LE_TC_1_LLC,
|
||||||
L3_3_WB),
|
L3_3_WB),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
||||||
goto out_free_gem;
|
goto out_free_gem;
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_object_put(obj);
|
|
||||||
|
|
||||||
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
|
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
|
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
|
||||||
|
@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
||||||
file_count(dmabuf->file),
|
file_count(dmabuf->file),
|
||||||
kref_read(&obj->base.refcount));
|
kref_read(&obj->base.refcount));
|
||||||
|
|
||||||
|
i915_gem_object_put(obj);
|
||||||
|
|
||||||
return dmabuf_fd;
|
return dmabuf_fd;
|
||||||
|
|
||||||
out_free_dmabuf:
|
out_free_dmabuf:
|
||||||
|
|
|
@ -53,13 +53,11 @@
|
||||||
* granting userspace undue privileges. There are three categories of privilege.
|
* granting userspace undue privileges. There are three categories of privilege.
|
||||||
*
|
*
|
||||||
* First, commands which are explicitly defined as privileged or which should
|
* First, commands which are explicitly defined as privileged or which should
|
||||||
* only be used by the kernel driver. The parser generally rejects such
|
* only be used by the kernel driver. The parser rejects such commands
|
||||||
* commands, though it may allow some from the drm master process.
|
|
||||||
*
|
*
|
||||||
* Second, commands which access registers. To support correct/enhanced
|
* Second, commands which access registers. To support correct/enhanced
|
||||||
* userspace functionality, particularly certain OpenGL extensions, the parser
|
* userspace functionality, particularly certain OpenGL extensions, the parser
|
||||||
* provides a whitelist of registers which userspace may safely access (for both
|
* provides a whitelist of registers which userspace may safely access
|
||||||
* normal and drm master processes).
|
|
||||||
*
|
*
|
||||||
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
|
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
|
||||||
* The parser always rejects such commands.
|
* The parser always rejects such commands.
|
||||||
|
@ -84,9 +82,9 @@
|
||||||
* in the per-engine command tables.
|
* in the per-engine command tables.
|
||||||
*
|
*
|
||||||
* Other command table entries map fairly directly to high level categories
|
* Other command table entries map fairly directly to high level categories
|
||||||
* mentioned above: rejected, master-only, register whitelist. The parser
|
* mentioned above: rejected, register whitelist. The parser implements a number
|
||||||
* implements a number of checks, including the privileged memory checks, via a
|
* of checks, including the privileged memory checks, via a general bitmasking
|
||||||
* general bitmasking mechanism.
|
* mechanism.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
|
||||||
* CMD_DESC_REJECT: The command is never allowed
|
* CMD_DESC_REJECT: The command is never allowed
|
||||||
* CMD_DESC_REGISTER: The command should be checked against the
|
* CMD_DESC_REGISTER: The command should be checked against the
|
||||||
* register whitelist for the appropriate ring
|
* register whitelist for the appropriate ring
|
||||||
* CMD_DESC_MASTER: The command is allowed if the submitting process
|
|
||||||
* is the DRM master
|
|
||||||
*/
|
*/
|
||||||
u32 flags;
|
u32 flags;
|
||||||
#define CMD_DESC_FIXED (1<<0)
|
#define CMD_DESC_FIXED (1<<0)
|
||||||
|
@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
|
||||||
#define CMD_DESC_REJECT (1<<2)
|
#define CMD_DESC_REJECT (1<<2)
|
||||||
#define CMD_DESC_REGISTER (1<<3)
|
#define CMD_DESC_REGISTER (1<<3)
|
||||||
#define CMD_DESC_BITMASK (1<<4)
|
#define CMD_DESC_BITMASK (1<<4)
|
||||||
#define CMD_DESC_MASTER (1<<5)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The command's unique identification bits and the bitmask to get them.
|
* The command's unique identification bits and the bitmask to get them.
|
||||||
|
@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
|
||||||
#define CMD(op, opm, f, lm, fl, ...) \
|
#define CMD(op, opm, f, lm, fl, ...) \
|
||||||
{ \
|
{ \
|
||||||
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
|
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
|
||||||
.cmd = { (op), ~0u << (opm) }, \
|
.cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
|
||||||
.length = { (lm) }, \
|
.length = { (lm) }, \
|
||||||
__VA_ARGS__ \
|
__VA_ARGS__ \
|
||||||
}
|
}
|
||||||
|
@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
|
||||||
#define R CMD_DESC_REJECT
|
#define R CMD_DESC_REJECT
|
||||||
#define W CMD_DESC_REGISTER
|
#define W CMD_DESC_REGISTER
|
||||||
#define B CMD_DESC_BITMASK
|
#define B CMD_DESC_BITMASK
|
||||||
#define M CMD_DESC_MASTER
|
|
||||||
|
|
||||||
/* Command Mask Fixed Len Action
|
/* Command Mask Fixed Len Action
|
||||||
---------------------------------------------------------- */
|
---------------------------------------------------------- */
|
||||||
static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
|
||||||
CMD( MI_NOOP, SMI, F, 1, S ),
|
CMD( MI_NOOP, SMI, F, 1, S ),
|
||||||
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
|
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
|
||||||
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
|
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
|
||||||
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
|
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
|
||||||
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
|
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
|
||||||
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
|
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
|
||||||
|
@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
||||||
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
|
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor render_cmds[] = {
|
static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
|
||||||
CMD( MI_FLUSH, SMI, F, 1, S ),
|
CMD( MI_FLUSH, SMI, F, 1, S ),
|
||||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||||
CMD( MI_PREDICATE, SMI, F, 1, S ),
|
CMD( MI_PREDICATE, SMI, F, 1, S ),
|
||||||
|
@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
|
||||||
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
|
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
|
||||||
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
||||||
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
|
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
|
||||||
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
|
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
|
||||||
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
||||||
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
|
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
|
||||||
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
|
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
|
||||||
|
@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
|
||||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
|
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor video_cmds[] = {
|
static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
|
||||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||||
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
||||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
||||||
|
@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
|
||||||
CMD( MFX_WAIT, SMFX, F, 1, S ),
|
CMD( MFX_WAIT, SMFX, F, 1, S ),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
|
static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
|
||||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||||
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
CMD( MI_SET_APPID, SMI, F, 1, S ),
|
||||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
||||||
|
@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
|
||||||
}}, ),
|
}}, ),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor blt_cmds[] = {
|
static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
|
||||||
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
|
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
|
||||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
|
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
|
||||||
.bits = {{
|
.bits = {{
|
||||||
|
@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
|
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
|
||||||
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
|
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
|
||||||
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For Gen9 we can still rely on the h/w to enforce cmd security, and only
|
||||||
|
* need to re-enforce the register access checks. We therefore only need to
|
||||||
|
* teach the cmdparser how to find the end of each command, and identify
|
||||||
|
* register accesses. The table doesn't need to reject any commands, and so
|
||||||
|
* the only commands listed here are:
|
||||||
|
* 1) Those that touch registers
|
||||||
|
* 2) Those that do not have the default 8-bit length
|
||||||
|
*
|
||||||
|
* Note that the default MI length mask chosen for this table is 0xFF, not
|
||||||
|
* the 0x3F used on older devices. This is because the vast majority of MI
|
||||||
|
* cmds on Gen9 use a standard 8-bit Length field.
|
||||||
|
* All the Gen9 blitter instructions are standard 0xFF length mask, and
|
||||||
|
* none allow access to non-general registers, so in fact no BLT cmds are
|
||||||
|
* included in the table at all.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
|
||||||
|
CMD( MI_NOOP, SMI, F, 1, S ),
|
||||||
|
CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
|
||||||
|
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
|
||||||
|
CMD( MI_FLUSH, SMI, F, 1, S ),
|
||||||
|
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
|
||||||
|
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
|
||||||
|
CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
|
||||||
|
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
|
||||||
|
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
|
||||||
|
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
|
||||||
|
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
|
||||||
|
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
|
||||||
|
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
|
||||||
|
CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
|
||||||
|
CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
|
||||||
|
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
|
||||||
|
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
|
||||||
|
CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
|
||||||
|
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
|
||||||
|
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
|
||||||
|
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We allow BB_START but apply further checks. We just sanitize the
|
||||||
|
* basic fields here.
|
||||||
|
*/
|
||||||
|
#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
|
||||||
|
#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
|
||||||
|
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
|
||||||
|
.bits = {{
|
||||||
|
.offset = 0,
|
||||||
|
.mask = MI_BB_START_OPERAND_MASK,
|
||||||
|
.expected = MI_BB_START_OPERAND_EXPECT,
|
||||||
|
}}, ),
|
||||||
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_descriptor noop_desc =
|
static const struct drm_i915_cmd_descriptor noop_desc =
|
||||||
CMD(MI_NOOP, SMI, F, 1, S);
|
CMD(MI_NOOP, SMI, F, 1, S);
|
||||||
|
|
||||||
|
@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
|
||||||
#undef R
|
#undef R
|
||||||
#undef W
|
#undef W
|
||||||
#undef B
|
#undef B
|
||||||
#undef M
|
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table gen7_render_cmds[] = {
|
static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ render_cmds, ARRAY_SIZE(render_cmds) },
|
{ gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
|
static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ render_cmds, ARRAY_SIZE(render_cmds) },
|
{ gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
|
||||||
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
|
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table gen7_video_cmds[] = {
|
static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ video_cmds, ARRAY_SIZE(video_cmds) },
|
{ gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
|
static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
|
{ gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
|
static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
|
{ gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
|
static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
|
||||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
{ gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
|
||||||
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
|
{ gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
|
||||||
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
|
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
|
||||||
|
{ gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register whitelists, sorted by increasing register offset.
|
* Register whitelists, sorted by increasing register offset.
|
||||||
*/
|
*/
|
||||||
|
@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
|
||||||
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
|
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
|
static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
|
||||||
REG32(FORCEWAKE_MT),
|
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
|
||||||
REG32(DERRMR),
|
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
|
||||||
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
|
REG32(BCS_SWCTRL),
|
||||||
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
|
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
|
||||||
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
|
REG64_IDX(BCS_GPR, 0),
|
||||||
};
|
REG64_IDX(BCS_GPR, 1),
|
||||||
|
REG64_IDX(BCS_GPR, 2),
|
||||||
static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
|
REG64_IDX(BCS_GPR, 3),
|
||||||
REG32(FORCEWAKE_MT),
|
REG64_IDX(BCS_GPR, 4),
|
||||||
REG32(DERRMR),
|
REG64_IDX(BCS_GPR, 5),
|
||||||
|
REG64_IDX(BCS_GPR, 6),
|
||||||
|
REG64_IDX(BCS_GPR, 7),
|
||||||
|
REG64_IDX(BCS_GPR, 8),
|
||||||
|
REG64_IDX(BCS_GPR, 9),
|
||||||
|
REG64_IDX(BCS_GPR, 10),
|
||||||
|
REG64_IDX(BCS_GPR, 11),
|
||||||
|
REG64_IDX(BCS_GPR, 12),
|
||||||
|
REG64_IDX(BCS_GPR, 13),
|
||||||
|
REG64_IDX(BCS_GPR, 14),
|
||||||
|
REG64_IDX(BCS_GPR, 15),
|
||||||
};
|
};
|
||||||
|
|
||||||
#undef REG64
|
#undef REG64
|
||||||
|
@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
|
||||||
struct drm_i915_reg_table {
|
struct drm_i915_reg_table {
|
||||||
const struct drm_i915_reg_descriptor *regs;
|
const struct drm_i915_reg_descriptor *regs;
|
||||||
int num_regs;
|
int num_regs;
|
||||||
bool master;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
|
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
|
||||||
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
|
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
|
||||||
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
|
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
|
||||||
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
|
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
|
||||||
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
|
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
|
||||||
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
|
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
|
||||||
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
|
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
|
||||||
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
|
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
|
||||||
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
|
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
|
||||||
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
|
};
|
||||||
|
|
||||||
|
static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
|
||||||
|
{ gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||||
|
@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
|
||||||
|
{
|
||||||
|
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
|
||||||
|
|
||||||
|
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
|
||||||
|
return 0xFF;
|
||||||
|
|
||||||
|
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
|
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
|
||||||
const struct drm_i915_cmd_table *cmd_tables,
|
const struct drm_i915_cmd_table *cmd_tables,
|
||||||
int cmd_table_count)
|
int cmd_table_count)
|
||||||
|
@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||||
int cmd_table_count;
|
int cmd_table_count;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!IS_GEN(engine->i915, 7))
|
if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
|
||||||
|
engine->class == COPY_ENGINE_CLASS))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
switch (engine->class) {
|
switch (engine->class) {
|
||||||
case RENDER_CLASS:
|
case RENDER_CLASS:
|
||||||
if (IS_HASWELL(engine->i915)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
cmd_tables = hsw_render_ring_cmds;
|
cmd_tables = hsw_render_ring_cmd_table;
|
||||||
cmd_table_count =
|
cmd_table_count =
|
||||||
ARRAY_SIZE(hsw_render_ring_cmds);
|
ARRAY_SIZE(hsw_render_ring_cmd_table);
|
||||||
} else {
|
} else {
|
||||||
cmd_tables = gen7_render_cmds;
|
cmd_tables = gen7_render_cmd_table;
|
||||||
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_HASWELL(engine->i915)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
|
@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||||
engine->reg_tables = ivb_render_reg_tables;
|
engine->reg_tables = ivb_render_reg_tables;
|
||||||
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
|
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
|
||||||
}
|
}
|
||||||
|
|
||||||
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||||
break;
|
break;
|
||||||
case VIDEO_DECODE_CLASS:
|
case VIDEO_DECODE_CLASS:
|
||||||
cmd_tables = gen7_video_cmds;
|
cmd_tables = gen7_video_cmd_table;
|
||||||
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
|
cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
|
||||||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||||
break;
|
break;
|
||||||
case COPY_ENGINE_CLASS:
|
case COPY_ENGINE_CLASS:
|
||||||
if (IS_HASWELL(engine->i915)) {
|
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||||
cmd_tables = hsw_blt_ring_cmds;
|
if (IS_GEN(engine->i915, 9)) {
|
||||||
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
cmd_tables = gen9_blt_cmd_table;
|
||||||
|
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
|
||||||
|
engine->get_cmd_length_mask =
|
||||||
|
gen9_blt_get_cmd_length_mask;
|
||||||
|
|
||||||
|
/* BCS Engine unsafe without parser */
|
||||||
|
engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
|
||||||
|
} else if (IS_HASWELL(engine->i915)) {
|
||||||
|
cmd_tables = hsw_blt_ring_cmd_table;
|
||||||
|
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
|
||||||
} else {
|
} else {
|
||||||
cmd_tables = gen7_blt_cmds;
|
cmd_tables = gen7_blt_cmd_table;
|
||||||
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_HASWELL(engine->i915)) {
|
if (IS_GEN(engine->i915, 9)) {
|
||||||
|
engine->reg_tables = gen9_blt_reg_tables;
|
||||||
|
engine->reg_table_count =
|
||||||
|
ARRAY_SIZE(gen9_blt_reg_tables);
|
||||||
|
} else if (IS_HASWELL(engine->i915)) {
|
||||||
engine->reg_tables = hsw_blt_reg_tables;
|
engine->reg_tables = hsw_blt_reg_tables;
|
||||||
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
|
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
|
||||||
} else {
|
} else {
|
||||||
engine->reg_tables = ivb_blt_reg_tables;
|
engine->reg_tables = ivb_blt_reg_tables;
|
||||||
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
|
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
|
||||||
}
|
}
|
||||||
|
|
||||||
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
|
||||||
break;
|
break;
|
||||||
case VIDEO_ENHANCEMENT_CLASS:
|
case VIDEO_ENHANCEMENT_CLASS:
|
||||||
cmd_tables = hsw_vebox_cmds;
|
cmd_tables = hsw_vebox_cmd_table;
|
||||||
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
|
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
|
||||||
/* VECS can use the same length_mask function as VCS */
|
/* VECS can use the same length_mask function as VCS */
|
||||||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||||
break;
|
break;
|
||||||
|
@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
|
engine->flags |= I915_ENGINE_USING_CMD_PARSER;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
|
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
if (!intel_engine_needs_cmd_parser(engine))
|
if (!intel_engine_using_cmd_parser(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fini_hash_table(engine);
|
fini_hash_table(engine);
|
||||||
|
@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_i915_reg_descriptor *
|
static const struct drm_i915_reg_descriptor *
|
||||||
find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
|
find_reg(const struct intel_engine_cs *engine, u32 addr)
|
||||||
{
|
{
|
||||||
const struct drm_i915_reg_table *table = engine->reg_tables;
|
const struct drm_i915_reg_table *table = engine->reg_tables;
|
||||||
|
const struct drm_i915_reg_descriptor *reg = NULL;
|
||||||
int count = engine->reg_table_count;
|
int count = engine->reg_table_count;
|
||||||
|
|
||||||
for (; count > 0; ++table, --count) {
|
for (; !reg && (count > 0); ++table, --count)
|
||||||
if (!table->master || is_master) {
|
reg = __find_reg(table->regs, table->num_regs, addr);
|
||||||
const struct drm_i915_reg_descriptor *reg;
|
|
||||||
|
|
||||||
reg = __find_reg(table->regs, table->num_regs, addr);
|
return reg;
|
||||||
if (reg != NULL)
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
|
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
|
||||||
|
@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
||||||
|
|
||||||
static bool check_cmd(const struct intel_engine_cs *engine,
|
static bool check_cmd(const struct intel_engine_cs *engine,
|
||||||
const struct drm_i915_cmd_descriptor *desc,
|
const struct drm_i915_cmd_descriptor *desc,
|
||||||
const u32 *cmd, u32 length,
|
const u32 *cmd, u32 length)
|
||||||
const bool is_master)
|
|
||||||
{
|
{
|
||||||
if (desc->flags & CMD_DESC_SKIP)
|
if (desc->flags & CMD_DESC_SKIP)
|
||||||
return true;
|
return true;
|
||||||
|
@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
|
|
||||||
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
|
|
||||||
*cmd);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (desc->flags & CMD_DESC_REGISTER) {
|
if (desc->flags & CMD_DESC_REGISTER) {
|
||||||
/*
|
/*
|
||||||
* Get the distance between individual register offset
|
* Get the distance between individual register offset
|
||||||
|
@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||||
offset += step) {
|
offset += step) {
|
||||||
const u32 reg_addr = cmd[offset] & desc->reg.mask;
|
const u32 reg_addr = cmd[offset] & desc->reg.mask;
|
||||||
const struct drm_i915_reg_descriptor *reg =
|
const struct drm_i915_reg_descriptor *reg =
|
||||||
find_reg(engine, is_master, reg_addr);
|
find_reg(engine, reg_addr);
|
||||||
|
|
||||||
if (!reg) {
|
if (!reg) {
|
||||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
|
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
|
||||||
|
@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_bbstart(const struct i915_gem_context *ctx,
|
||||||
|
u32 *cmd, u32 offset, u32 length,
|
||||||
|
u32 batch_len,
|
||||||
|
u64 batch_start,
|
||||||
|
u64 shadow_batch_start)
|
||||||
|
{
|
||||||
|
u64 jump_offset, jump_target;
|
||||||
|
u32 target_cmd_offset, target_cmd_index;
|
||||||
|
|
||||||
|
/* For igt compatibility on older platforms */
|
||||||
|
if (CMDPARSER_USES_GGTT(ctx->i915)) {
|
||||||
|
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
|
||||||
|
return -EACCES;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (length != 3) {
|
||||||
|
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
|
||||||
|
length);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
jump_target = *(u64*)(cmd+1);
|
||||||
|
jump_offset = jump_target - batch_start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Any underflow of jump_target is guaranteed to be outside the range
|
||||||
|
* of a u32, so >= test catches both too large and too small
|
||||||
|
*/
|
||||||
|
if (jump_offset >= batch_len) {
|
||||||
|
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
|
||||||
|
jump_target);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This cannot overflow a u32 because we already checked jump_offset
|
||||||
|
* is within the BB, and the batch_len is a u32
|
||||||
|
*/
|
||||||
|
target_cmd_offset = lower_32_bits(jump_offset);
|
||||||
|
target_cmd_index = target_cmd_offset / sizeof(u32);
|
||||||
|
|
||||||
|
*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
|
||||||
|
|
||||||
|
if (target_cmd_index == offset)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (ctx->jump_whitelist_cmds <= target_cmd_index) {
|
||||||
|
DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
|
||||||
|
return -EINVAL;
|
||||||
|
} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
|
||||||
|
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
|
||||||
|
jump_target);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
|
||||||
|
{
|
||||||
|
const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
|
||||||
|
const u32 exact_size = BITS_TO_LONGS(batch_cmds);
|
||||||
|
u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
|
||||||
|
unsigned long *next_whitelist;
|
||||||
|
|
||||||
|
if (CMDPARSER_USES_GGTT(ctx->i915))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (batch_cmds <= ctx->jump_whitelist_cmds) {
|
||||||
|
bitmap_zero(ctx->jump_whitelist, batch_cmds);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
again:
|
||||||
|
next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
|
||||||
|
if (next_whitelist) {
|
||||||
|
kfree(ctx->jump_whitelist);
|
||||||
|
ctx->jump_whitelist = next_whitelist;
|
||||||
|
ctx->jump_whitelist_cmds =
|
||||||
|
next_size * BITS_PER_BYTE * sizeof(long);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next_size > exact_size) {
|
||||||
|
next_size = exact_size;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
|
||||||
|
bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
#define LENGTH_BIAS 2
|
#define LENGTH_BIAS 2
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
|
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
|
||||||
|
* @ctx: the context in which the batch is to execute
|
||||||
* @engine: the engine on which the batch is to execute
|
* @engine: the engine on which the batch is to execute
|
||||||
* @batch_obj: the batch buffer in question
|
* @batch_obj: the batch buffer in question
|
||||||
* @shadow_batch_obj: copy of the batch buffer in question
|
* @batch_start: Canonical base address of batch
|
||||||
* @batch_start_offset: byte offset in the batch at which execution starts
|
* @batch_start_offset: byte offset in the batch at which execution starts
|
||||||
* @batch_len: length of the commands in batch_obj
|
* @batch_len: length of the commands in batch_obj
|
||||||
* @is_master: is the submitting process the drm master?
|
* @shadow_batch_obj: copy of the batch buffer in question
|
||||||
|
* @shadow_batch_start: Canonical base address of shadow_batch_obj
|
||||||
*
|
*
|
||||||
* Parses the specified batch buffer looking for privilege violations as
|
* Parses the specified batch buffer looking for privilege violations as
|
||||||
* described in the overview.
|
* described in the overview.
|
||||||
|
@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||||
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
|
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
|
||||||
* if the batch appears legal but should use hardware parsing
|
* if the batch appears legal but should use hardware parsing
|
||||||
*/
|
*/
|
||||||
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
|
||||||
|
int intel_engine_cmd_parser(struct i915_gem_context *ctx,
|
||||||
|
struct intel_engine_cs *engine,
|
||||||
struct drm_i915_gem_object *batch_obj,
|
struct drm_i915_gem_object *batch_obj,
|
||||||
struct drm_i915_gem_object *shadow_batch_obj,
|
u64 batch_start,
|
||||||
u32 batch_start_offset,
|
u32 batch_start_offset,
|
||||||
u32 batch_len,
|
u32 batch_len,
|
||||||
bool is_master)
|
struct drm_i915_gem_object *shadow_batch_obj,
|
||||||
|
u64 shadow_batch_start)
|
||||||
{
|
{
|
||||||
u32 *cmd, *batch_end;
|
u32 *cmd, *batch_end, offset = 0;
|
||||||
struct drm_i915_cmd_descriptor default_desc = noop_desc;
|
struct drm_i915_cmd_descriptor default_desc = noop_desc;
|
||||||
const struct drm_i915_cmd_descriptor *desc = &default_desc;
|
const struct drm_i915_cmd_descriptor *desc = &default_desc;
|
||||||
bool needs_clflush_after = false;
|
bool needs_clflush_after = false;
|
||||||
|
@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||||
return PTR_ERR(cmd);
|
return PTR_ERR(cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init_whitelist(ctx, batch_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use the batch length as size because the shadow object is as
|
* We use the batch length as size because the shadow object is as
|
||||||
* large or larger and copy_batch() will write MI_NOPs to the extra
|
* large or larger and copy_batch() will write MI_NOPs to the extra
|
||||||
|
@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||||
do {
|
do {
|
||||||
u32 length;
|
u32 length;
|
||||||
|
|
||||||
if (*cmd == MI_BATCH_BUFFER_END) {
|
if (*cmd == MI_BATCH_BUFFER_END)
|
||||||
if (needs_clflush_after) {
|
|
||||||
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
|
|
||||||
drm_clflush_virt_range(ptr,
|
|
||||||
(void *)(cmd + 1) - ptr);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
desc = find_cmd(engine, *cmd, desc, &default_desc);
|
desc = find_cmd(engine, *cmd, desc, &default_desc);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
|
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
|
||||||
*cmd);
|
*cmd);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the batch buffer contains a chained batch, return an
|
|
||||||
* error that tells the caller to abort and dispatch the
|
|
||||||
* workload as a non-secure batch.
|
|
||||||
*/
|
|
||||||
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
|
|
||||||
ret = -EACCES;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (desc->flags & CMD_DESC_FIXED)
|
if (desc->flags & CMD_DESC_FIXED)
|
||||||
|
@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||||
length,
|
length,
|
||||||
batch_end - cmd);
|
batch_end - cmd);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!check_cmd(engine, desc, cmd, length)) {
|
||||||
|
ret = -EACCES;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
|
||||||
|
ret = check_bbstart(ctx, cmd, offset, length,
|
||||||
|
batch_len, batch_start,
|
||||||
|
shadow_batch_start);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!check_cmd(engine, desc, cmd, length, is_master)) {
|
if (ctx->jump_whitelist_cmds > offset)
|
||||||
ret = -EACCES;
|
set_bit(offset, ctx->jump_whitelist);
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd += length;
|
cmd += length;
|
||||||
|
offset += length;
|
||||||
if (cmd >= batch_end) {
|
if (cmd >= batch_end) {
|
||||||
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
|
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
goto err;
|
||||||
}
|
}
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
|
if (needs_clflush_after) {
|
||||||
|
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
|
||||||
|
|
||||||
|
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
err:
|
||||||
i915_gem_object_unpin_map(shadow_batch_obj);
|
i915_gem_object_unpin_map(shadow_batch_obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
/* If the command parser is not enabled, report 0 - unsupported */
|
/* If the command parser is not enabled, report 0 - unsupported */
|
||||||
for_each_uabi_engine(engine, dev_priv) {
|
for_each_uabi_engine(engine, dev_priv) {
|
||||||
if (intel_engine_needs_cmd_parser(engine)) {
|
if (intel_engine_using_cmd_parser(engine)) {
|
||||||
active = true;
|
active = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
|
||||||
* the parser enabled.
|
* the parser enabled.
|
||||||
* 9. Don't whitelist or handle oacontrol specially, as ownership
|
* 9. Don't whitelist or handle oacontrol specially, as ownership
|
||||||
* for oacontrol state is moving to i915-perf.
|
* for oacontrol state is moving to i915-perf.
|
||||||
|
* 10. Support for Gen9 BCS Parsing
|
||||||
*/
|
*/
|
||||||
return 9;
|
return 10;
|
||||||
}
|
}
|
||||||
|
|
|
@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_vga_client;
|
goto cleanup_vga_client;
|
||||||
|
|
||||||
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
|
|
||||||
intel_update_rawclk(dev_priv);
|
|
||||||
|
|
||||||
intel_power_domains_init_hw(dev_priv, false);
|
intel_power_domains_init_hw(dev_priv, false);
|
||||||
|
|
||||||
intel_csr_ucode_init(dev_priv);
|
intel_csr_ucode_init(dev_priv);
|
||||||
|
@ -1850,6 +1847,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
||||||
|
|
||||||
i915_gem_suspend_late(dev_priv);
|
i915_gem_suspend_late(dev_priv);
|
||||||
|
|
||||||
|
i915_rc6_ctx_wa_suspend(dev_priv);
|
||||||
|
|
||||||
intel_uncore_suspend(&dev_priv->uncore);
|
intel_uncore_suspend(&dev_priv->uncore);
|
||||||
|
|
||||||
intel_power_domains_suspend(dev_priv,
|
intel_power_domains_suspend(dev_priv,
|
||||||
|
@ -2053,6 +2052,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||||
|
|
||||||
intel_power_domains_resume(dev_priv);
|
intel_power_domains_resume(dev_priv);
|
||||||
|
|
||||||
|
i915_rc6_ctx_wa_resume(dev_priv);
|
||||||
|
|
||||||
intel_gt_sanitize(&dev_priv->gt, true);
|
intel_gt_sanitize(&dev_priv->gt, true);
|
||||||
|
|
||||||
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
||||||
|
|
|
@ -593,6 +593,8 @@ struct intel_rps {
|
||||||
|
|
||||||
struct intel_rc6 {
|
struct intel_rc6 {
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
bool ctx_corrupted;
|
||||||
|
intel_wakeref_t ctx_corrupted_wakeref;
|
||||||
u64 prev_hw_residency[4];
|
u64 prev_hw_residency[4];
|
||||||
u64 cur_residency[4];
|
u64 cur_residency[4];
|
||||||
};
|
};
|
||||||
|
@ -2075,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||||
#define VEBOX_MASK(dev_priv) \
|
#define VEBOX_MASK(dev_priv) \
|
||||||
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
|
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
|
||||||
|
* All later gens can run the final buffer from the ppgtt
|
||||||
|
*/
|
||||||
|
#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
|
||||||
|
|
||||||
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
|
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
|
||||||
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
|
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
|
||||||
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
|
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
|
||||||
|
#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
|
||||||
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
|
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
|
||||||
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
|
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
|
||||||
|
|
||||||
|
@ -2110,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||||
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
|
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
|
||||||
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
||||||
|
|
||||||
|
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
|
||||||
|
(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
|
||||||
|
|
||||||
/* WaRsDisableCoarsePowerGating:skl,cnl */
|
/* WaRsDisableCoarsePowerGating:skl,cnl */
|
||||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
|
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
|
||||||
(IS_CANNONLAKE(dev_priv) || \
|
(IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
|
||||||
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
|
|
||||||
|
|
||||||
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
|
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
|
||||||
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
|
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
|
||||||
|
@ -2284,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
|
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
|
||||||
|
|
||||||
|
struct i915_vma * __must_check
|
||||||
|
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||||
|
struct i915_address_space *vm,
|
||||||
|
const struct i915_ggtt_view *view,
|
||||||
|
u64 size,
|
||||||
|
u64 alignment,
|
||||||
|
u64 flags);
|
||||||
|
|
||||||
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
|
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
static inline int __must_check
|
static inline int __must_check
|
||||||
|
@ -2393,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
||||||
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
|
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
|
||||||
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
|
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
|
||||||
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
|
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
|
||||||
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
int intel_engine_cmd_parser(struct i915_gem_context *cxt,
|
||||||
|
struct intel_engine_cs *engine,
|
||||||
struct drm_i915_gem_object *batch_obj,
|
struct drm_i915_gem_object *batch_obj,
|
||||||
struct drm_i915_gem_object *shadow_batch_obj,
|
u64 user_batch_start,
|
||||||
u32 batch_start_offset,
|
u32 batch_start_offset,
|
||||||
u32 batch_len,
|
u32 batch_len,
|
||||||
bool is_master);
|
struct drm_i915_gem_object *shadow_batch_obj,
|
||||||
|
u64 shadow_batch_start);
|
||||||
|
|
||||||
/* intel_device_info.c */
|
/* intel_device_info.c */
|
||||||
static inline struct intel_device_info *
|
static inline struct intel_device_info *
|
||||||
|
|
|
@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||||
struct i915_address_space *vm = &dev_priv->ggtt.vm;
|
struct i915_address_space *vm = &dev_priv->ggtt.vm;
|
||||||
|
|
||||||
|
return i915_gem_object_pin(obj, vm, view, size, alignment,
|
||||||
|
flags | PIN_GLOBAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct i915_vma *
|
||||||
|
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||||
|
struct i915_address_space *vm,
|
||||||
|
const struct i915_ggtt_view *view,
|
||||||
|
u64 size,
|
||||||
|
u64 alignment,
|
||||||
|
u64 flags)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
|
ret = i915_vma_pin(vma, size, alignment, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
|
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_SECURE_BATCHES:
|
case I915_PARAM_HAS_SECURE_BATCHES:
|
||||||
value = capable(CAP_SYS_ADMIN);
|
value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_CMD_PARSER_VERSION:
|
case I915_PARAM_CMD_PARSER_VERSION:
|
||||||
value = i915_cmd_parser_get_version(i915);
|
value = i915_cmd_parser_get_version(i915);
|
||||||
|
|
|
@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||||
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
|
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
|
||||||
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
|
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
|
||||||
|
|
||||||
|
#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
|
||||||
|
|
||||||
#define GAC_ECO_BITS _MMIO(0x14090)
|
#define GAC_ECO_BITS _MMIO(0x14090)
|
||||||
#define ECOBITS_SNB_BIT (1 << 13)
|
#define ECOBITS_SNB_BIT (1 << 13)
|
||||||
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
|
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
|
||||||
|
@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||||
*/
|
*/
|
||||||
#define BCS_SWCTRL _MMIO(0x22200)
|
#define BCS_SWCTRL _MMIO(0x22200)
|
||||||
|
|
||||||
|
/* There are 16 GPR registers */
|
||||||
|
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
|
||||||
|
#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
|
||||||
|
|
||||||
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
|
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
|
||||||
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
|
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
|
||||||
#define HS_INVOCATION_COUNT _MMIO(0x2300)
|
#define HS_INVOCATION_COUNT _MMIO(0x2300)
|
||||||
|
@ -7211,6 +7217,10 @@ enum {
|
||||||
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
|
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
|
||||||
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
|
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
|
||||||
|
|
||||||
|
/* Display Internal Timeout Register */
|
||||||
|
#define RM_TIMEOUT _MMIO(0x42060)
|
||||||
|
#define MMIO_TIMEOUT_US(us) ((us) << 0)
|
||||||
|
|
||||||
/* interrupts */
|
/* interrupts */
|
||||||
#define DE_MASTER_IRQ_CONTROL (1 << 31)
|
#define DE_MASTER_IRQ_CONTROL (1 << 31)
|
||||||
#define DE_SPRITEB_FLIP_DONE (1 << 29)
|
#define DE_SPRITEB_FLIP_DONE (1 << 29)
|
||||||
|
|
|
@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||||
*/
|
*/
|
||||||
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
|
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
|
||||||
PWM1_GATING_DIS | PWM2_GATING_DIS);
|
PWM1_GATING_DIS | PWM2_GATING_DIS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lower the display internal timeout.
|
||||||
|
* This is needed to avoid any hard hangs when DSI port PLL
|
||||||
|
* is off and a MMIO access is attempted by any privilege
|
||||||
|
* application, using batch buffers or any other means.
|
||||||
|
*/
|
||||||
|
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
|
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||||
|
@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
|
||||||
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
|
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
return !I915_READ(GEN8_RC6_CTX_INFO);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (i915_rc6_ctx_corrupted(i915)) {
|
||||||
|
DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted = true;
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted_wakeref =
|
||||||
|
intel_runtime_pm_get(&i915->runtime_pm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (i915->gt_pm.rc6.ctx_corrupted) {
|
||||||
|
intel_runtime_pm_put(&i915->runtime_pm,
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted_wakeref);
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
|
||||||
|
* @i915: i915 device
|
||||||
|
*
|
||||||
|
* Perform any steps needed to clean up the RC6 CTX WA before system suspend.
|
||||||
|
*/
|
||||||
|
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (i915->gt_pm.rc6.ctx_corrupted)
|
||||||
|
intel_runtime_pm_put(&i915->runtime_pm,
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted_wakeref);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
|
||||||
|
* @i915: i915 device
|
||||||
|
*
|
||||||
|
* Perform any steps needed to re-init the RC6 CTX WA after system resume.
|
||||||
|
*/
|
||||||
|
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (!i915->gt_pm.rc6.ctx_corrupted)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (i915_rc6_ctx_corrupted(i915)) {
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted_wakeref =
|
||||||
|
intel_runtime_pm_get(&i915->runtime_pm);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_disable_rc6(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
|
||||||
|
* @i915: i915 device
|
||||||
|
*
|
||||||
|
* Check if an RC6 CTX corruption has happened since the last check and if so
|
||||||
|
* disable RC6 and runtime power management.
|
||||||
|
*
|
||||||
|
* Return false if no context corruption has happened since the last call of
|
||||||
|
* this function, true otherwise.
|
||||||
|
*/
|
||||||
|
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (i915->gt_pm.rc6.ctx_corrupted)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!i915_rc6_ctx_corrupted(i915))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
|
||||||
|
|
||||||
|
intel_disable_rc6(i915);
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted = true;
|
||||||
|
i915->gt_pm.rc6.ctx_corrupted_wakeref =
|
||||||
|
intel_runtime_pm_get_noresume(&i915->runtime_pm);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
|
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||||
|
@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
|
||||||
pm_runtime_get(&dev_priv->drm.pdev->dev);
|
pm_runtime_get(&dev_priv->drm.pdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i915_rc6_ctx_wa_init(dev_priv);
|
||||||
|
|
||||||
/* Initialize RPS limits (for userspace) */
|
/* Initialize RPS limits (for userspace) */
|
||||||
if (IS_CHERRYVIEW(dev_priv))
|
if (IS_CHERRYVIEW(dev_priv))
|
||||||
cherryview_init_gt_powersave(dev_priv);
|
cherryview_init_gt_powersave(dev_priv);
|
||||||
|
@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
|
||||||
if (IS_VALLEYVIEW(dev_priv))
|
if (IS_VALLEYVIEW(dev_priv))
|
||||||
valleyview_cleanup_gt_powersave(dev_priv);
|
valleyview_cleanup_gt_powersave(dev_priv);
|
||||||
|
|
||||||
|
i915_rc6_ctx_wa_cleanup(dev_priv);
|
||||||
|
|
||||||
if (!HAS_RC6(dev_priv))
|
if (!HAS_RC6(dev_priv))
|
||||||
pm_runtime_put(&dev_priv->drm.pdev->dev);
|
pm_runtime_put(&dev_priv->drm.pdev->dev);
|
||||||
}
|
}
|
||||||
|
@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
|
||||||
i915->gt_pm.llc_pstate.enabled = false;
|
i915->gt_pm.llc_pstate.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
|
static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
|
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
|
||||||
|
|
||||||
|
@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
|
||||||
dev_priv->gt_pm.rc6.enabled = false;
|
dev_priv->gt_pm.rc6.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||||
|
|
||||||
|
mutex_lock(&rps->lock);
|
||||||
|
__intel_disable_rc6(dev_priv);
|
||||||
|
mutex_unlock(&rps->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_disable_rps(struct drm_i915_private *dev_priv)
|
static void intel_disable_rps(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
|
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
|
||||||
|
@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
mutex_lock(&dev_priv->gt_pm.rps.lock);
|
mutex_lock(&dev_priv->gt_pm.rps.lock);
|
||||||
|
|
||||||
intel_disable_rc6(dev_priv);
|
__intel_disable_rc6(dev_priv);
|
||||||
intel_disable_rps(dev_priv);
|
intel_disable_rps(dev_priv);
|
||||||
if (HAS_LLC(dev_priv))
|
if (HAS_LLC(dev_priv))
|
||||||
intel_disable_llc_pstate(dev_priv);
|
intel_disable_llc_pstate(dev_priv);
|
||||||
|
@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
|
||||||
if (dev_priv->gt_pm.rc6.enabled)
|
if (dev_priv->gt_pm.rc6.enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (dev_priv->gt_pm.rc6.ctx_corrupted)
|
||||||
|
return;
|
||||||
|
|
||||||
if (IS_CHERRYVIEW(dev_priv))
|
if (IS_CHERRYVIEW(dev_priv))
|
||||||
cherryview_enable_rc6(dev_priv);
|
cherryview_enable_rc6(dev_priv);
|
||||||
else if (IS_VALLEYVIEW(dev_priv))
|
else if (IS_VALLEYVIEW(dev_priv))
|
||||||
|
|
|
@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
|
||||||
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
|
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
|
||||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
|
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||||
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
|
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||||
|
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
|
||||||
|
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
|
||||||
|
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
|
||||||
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
||||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||||
void gen6_rps_boost(struct i915_request *rq);
|
void gen6_rps_boost(struct i915_request *rq);
|
||||||
|
|
|
@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
|
||||||
|
|
||||||
WARN_ON(!tcon->quirks->has_channel_0);
|
WARN_ON(!tcon->quirks->has_channel_0);
|
||||||
|
|
||||||
tcon->dclk_min_div = 6;
|
tcon->dclk_min_div = 1;
|
||||||
tcon->dclk_max_div = 127;
|
tcon->dclk_max_div = 127;
|
||||||
sun4i_tcon0_mode_set_common(tcon, mode);
|
sun4i_tcon0_mode_set_common(tcon, mode);
|
||||||
|
|
||||||
|
|
|
@ -626,6 +626,9 @@ static void intel_th_gth_switch(struct intel_th_device *thdev,
|
||||||
if (!count)
|
if (!count)
|
||||||
dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
|
dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
|
||||||
|
|
||||||
|
/* De-assert the trigger */
|
||||||
|
iowrite32(0, gth->base + REG_CTS_CTL);
|
||||||
|
|
||||||
intel_th_gth_stop(gth, output, false);
|
intel_th_gth_stop(gth, output, false);
|
||||||
intel_th_gth_start(gth, output);
|
intel_th_gth_start(gth, output);
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,7 +164,7 @@ struct msc {
|
||||||
};
|
};
|
||||||
|
|
||||||
static LIST_HEAD(msu_buffer_list);
|
static LIST_HEAD(msu_buffer_list);
|
||||||
static struct mutex msu_buffer_mutex;
|
static DEFINE_MUTEX(msu_buffer_mutex);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct msu_buffer_entry - internal MSU buffer bookkeeping
|
* struct msu_buffer_entry - internal MSU buffer bookkeeping
|
||||||
|
@ -327,7 +327,7 @@ static size_t msc_win_total_sz(struct msc_window *win)
|
||||||
struct msc_block_desc *bdesc = sg_virt(sg);
|
struct msc_block_desc *bdesc = sg_virt(sg);
|
||||||
|
|
||||||
if (msc_block_wrapped(bdesc))
|
if (msc_block_wrapped(bdesc))
|
||||||
return win->nr_blocks << PAGE_SHIFT;
|
return (size_t)win->nr_blocks << PAGE_SHIFT;
|
||||||
|
|
||||||
size += msc_total_sz(bdesc);
|
size += msc_total_sz(bdesc);
|
||||||
if (msc_block_last_written(bdesc))
|
if (msc_block_last_written(bdesc))
|
||||||
|
@ -1848,9 +1848,14 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
|
||||||
len = cp - buf;
|
len = cp - buf;
|
||||||
|
|
||||||
mode = kstrndup(buf, len, GFP_KERNEL);
|
mode = kstrndup(buf, len, GFP_KERNEL);
|
||||||
|
if (!mode)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
|
i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
|
||||||
if (i >= 0)
|
if (i >= 0) {
|
||||||
|
kfree(mode);
|
||||||
goto found;
|
goto found;
|
||||||
|
}
|
||||||
|
|
||||||
/* Buffer sinks only work with a usable IRQ */
|
/* Buffer sinks only work with a usable IRQ */
|
||||||
if (!msc->do_irq) {
|
if (!msc->do_irq) {
|
||||||
|
|
|
@ -199,6 +199,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
|
||||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Comet Lake PCH */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
|
||||||
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
/* Ice Lake NNPI */
|
/* Ice Lake NNPI */
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
|
||||||
|
@ -209,6 +214,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
|
||||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Jasper Lake PCH */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
||||||
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
|
},
|
||||||
{ 0 },
|
{ 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
|
||||||
cookie = dmaengine_submit(desc);
|
cookie = dmaengine_submit(desc);
|
||||||
ret = dma_submit_error(cookie);
|
ret = dma_submit_error(cookie);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dmaengine_terminate_all(adc->dma_chan);
|
dmaengine_terminate_sync(adc->dma_chan);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
|
||||||
stm32_adc_conv_irq_disable(adc);
|
stm32_adc_conv_irq_disable(adc);
|
||||||
|
|
||||||
if (adc->dma_chan)
|
if (adc->dma_chan)
|
||||||
dmaengine_terminate_all(adc->dma_chan);
|
dmaengine_terminate_sync(adc->dma_chan);
|
||||||
|
|
||||||
if (stm32_adc_set_trig(indio_dev, NULL))
|
if (stm32_adc_set_trig(indio_dev, NULL))
|
||||||
dev_err(&indio_dev->dev, "Can't clear trigger\n");
|
dev_err(&indio_dev->dev, "Can't clear trigger\n");
|
||||||
|
|
|
@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
|
||||||
struct adis16480 *st = iio_priv(indio_dev);
|
struct adis16480 *st = iio_priv(indio_dev);
|
||||||
unsigned int t, reg;
|
unsigned int t, reg;
|
||||||
|
|
||||||
|
if (val < 0 || val2 < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
t = val * 1000 + val2 / 1000;
|
t = val * 1000 + val2 / 1000;
|
||||||
if (t <= 0)
|
if (t == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
|
||||||
.name = "MPU6050",
|
.name = "MPU6050",
|
||||||
.reg = ®_set_6050,
|
.reg = ®_set_6050,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 1024,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU6500_WHOAMI_VALUE,
|
.whoami = INV_MPU6500_WHOAMI_VALUE,
|
||||||
.name = "MPU6500",
|
.name = "MPU6500",
|
||||||
.reg = ®_set_6500,
|
.reg = ®_set_6500,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 512,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU6515_WHOAMI_VALUE,
|
.whoami = INV_MPU6515_WHOAMI_VALUE,
|
||||||
.name = "MPU6515",
|
.name = "MPU6515",
|
||||||
.reg = ®_set_6500,
|
.reg = ®_set_6500,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 512,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU6000_WHOAMI_VALUE,
|
.whoami = INV_MPU6000_WHOAMI_VALUE,
|
||||||
.name = "MPU6000",
|
.name = "MPU6000",
|
||||||
.reg = ®_set_6050,
|
.reg = ®_set_6050,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 1024,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU9150_WHOAMI_VALUE,
|
.whoami = INV_MPU9150_WHOAMI_VALUE,
|
||||||
.name = "MPU9150",
|
.name = "MPU9150",
|
||||||
.reg = ®_set_6050,
|
.reg = ®_set_6050,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 1024,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU9250_WHOAMI_VALUE,
|
.whoami = INV_MPU9250_WHOAMI_VALUE,
|
||||||
.name = "MPU9250",
|
.name = "MPU9250",
|
||||||
.reg = ®_set_6500,
|
.reg = ®_set_6500,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 512,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_MPU9255_WHOAMI_VALUE,
|
.whoami = INV_MPU9255_WHOAMI_VALUE,
|
||||||
.name = "MPU9255",
|
.name = "MPU9255",
|
||||||
.reg = ®_set_6500,
|
.reg = ®_set_6500,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 512,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_ICM20608_WHOAMI_VALUE,
|
.whoami = INV_ICM20608_WHOAMI_VALUE,
|
||||||
.name = "ICM20608",
|
.name = "ICM20608",
|
||||||
.reg = ®_set_6500,
|
.reg = ®_set_6500,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 512,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.whoami = INV_ICM20602_WHOAMI_VALUE,
|
.whoami = INV_ICM20602_WHOAMI_VALUE,
|
||||||
.name = "ICM20602",
|
.name = "ICM20602",
|
||||||
.reg = ®_set_icm20602,
|
.reg = ®_set_icm20602,
|
||||||
.config = &chip_config_6050,
|
.config = &chip_config_6050,
|
||||||
|
.fifo_size = 1008,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
|
||||||
* @name: name of the chip.
|
* @name: name of the chip.
|
||||||
* @reg: register map of the chip.
|
* @reg: register map of the chip.
|
||||||
* @config: configuration of the chip.
|
* @config: configuration of the chip.
|
||||||
|
* @fifo_size: size of the FIFO in bytes.
|
||||||
*/
|
*/
|
||||||
struct inv_mpu6050_hw {
|
struct inv_mpu6050_hw {
|
||||||
u8 whoami;
|
u8 whoami;
|
||||||
u8 *name;
|
u8 *name;
|
||||||
const struct inv_mpu6050_reg_map *reg;
|
const struct inv_mpu6050_reg_map *reg;
|
||||||
const struct inv_mpu6050_chip_config *config;
|
const struct inv_mpu6050_chip_config *config;
|
||||||
|
size_t fifo_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
|
||||||
"failed to ack interrupt\n");
|
"failed to ack interrupt\n");
|
||||||
goto flush_fifo;
|
goto flush_fifo;
|
||||||
}
|
}
|
||||||
/* handle fifo overflow by reseting fifo */
|
|
||||||
if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
|
|
||||||
goto flush_fifo;
|
|
||||||
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
|
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
|
||||||
dev_warn(regmap_get_device(st->map),
|
dev_warn(regmap_get_device(st->map),
|
||||||
"spurious interrupt with status 0x%x\n", int_status);
|
"spurious interrupt with status 0x%x\n", int_status);
|
||||||
|
@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
|
||||||
if (result)
|
if (result)
|
||||||
goto end_session;
|
goto end_session;
|
||||||
fifo_count = get_unaligned_be16(&data[0]);
|
fifo_count = get_unaligned_be16(&data[0]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Handle fifo overflow by resetting fifo.
|
||||||
|
* Reset if there is only 3 data set free remaining to mitigate
|
||||||
|
* possible delay between reading fifo count and fifo data.
|
||||||
|
*/
|
||||||
|
nb = 3 * bytes_per_datum;
|
||||||
|
if (fifo_count >= st->hw->fifo_size - nb) {
|
||||||
|
dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
|
||||||
|
goto flush_fifo;
|
||||||
|
}
|
||||||
|
|
||||||
/* compute and process all complete datum */
|
/* compute and process all complete datum */
|
||||||
nb = fifo_count / bytes_per_datum;
|
nb = fifo_count / bytes_per_datum;
|
||||||
inv_mpu6050_update_period(st, pf->timestamp, nb);
|
inv_mpu6050_update_period(st, pf->timestamp, nb);
|
||||||
|
|
|
@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
|
||||||
udelay(data->cfg->trigger_pulse_us);
|
udelay(data->cfg->trigger_pulse_us);
|
||||||
gpiod_set_value(data->gpiod_trig, 0);
|
gpiod_set_value(data->gpiod_trig, 0);
|
||||||
|
|
||||||
/* it cannot take more than 20 ms */
|
/* it should not take more than 20 ms until echo is rising */
|
||||||
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
|
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mutex_unlock(&data->lock);
|
mutex_unlock(&data->lock);
|
||||||
|
@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
|
/* it cannot take more than 50 ms until echo is falling */
|
||||||
|
ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mutex_unlock(&data->lock);
|
mutex_unlock(&data->lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
|
||||||
|
|
||||||
dt_ns = ktime_to_ns(ktime_dt);
|
dt_ns = ktime_to_ns(ktime_dt);
|
||||||
/*
|
/*
|
||||||
* measuring more than 3 meters is beyond the capabilities of
|
* measuring more than 6,45 meters is beyond the capabilities of
|
||||||
* the sensor
|
* the supported sensors
|
||||||
* ==> filter out invalid results for not measuring echos of
|
* ==> filter out invalid results for not measuring echos of
|
||||||
* another us sensor
|
* another us sensor
|
||||||
*
|
*
|
||||||
* formula:
|
* formula:
|
||||||
* distance 3 m
|
* distance 6,45 * 2 m
|
||||||
* time = ---------- = --------- = 9404389 ns
|
* time = ---------- = ------------ = 40438871 ns
|
||||||
* speed 319 m/s
|
* speed 319 m/s
|
||||||
*
|
*
|
||||||
* using a minimum speed at -20 °C of 319 m/s
|
* using a minimum speed at -20 °C of 319 m/s
|
||||||
*/
|
*/
|
||||||
if (dt_ns > 9404389)
|
if (dt_ns > 40438871)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
time_ns = dt_ns;
|
time_ns = dt_ns;
|
||||||
|
@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
|
||||||
* with Temp in °C
|
* with Temp in °C
|
||||||
* and speed in m/s
|
* and speed in m/s
|
||||||
*
|
*
|
||||||
* use 343 m/s as ultrasonic speed at 20 °C here in absence of the
|
* use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
|
||||||
* temperature
|
* temperature
|
||||||
*
|
*
|
||||||
* therefore:
|
* therefore:
|
||||||
* time 343
|
* time 343,5 time * 106
|
||||||
* distance = ------ * -----
|
* distance = ------ * ------- = ------------
|
||||||
* 10^6 2
|
* 10^6 2 617176
|
||||||
* with time in ns
|
* with time in ns
|
||||||
* and distance in mm (one way)
|
* and distance in mm (one way)
|
||||||
*
|
*
|
||||||
* because we limit to 3 meters the multiplication with 343 just
|
* because we limit to 6,45 meters the multiplication with 106 just
|
||||||
* fits into 32 bit
|
* fits into 32 bit
|
||||||
*/
|
*/
|
||||||
distance_mm = time_ns * 343 / 2000000;
|
distance_mm = time_ns * 106 / 617176;
|
||||||
|
|
||||||
return distance_mm;
|
return distance_mm;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
|
||||||
goto bail_dev;
|
goto bail_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
hfi1_compute_tid_rdma_flow_wt();
|
|
||||||
/*
|
/*
|
||||||
* These must be called before the driver is registered with
|
* These must be called before the driver is registered with
|
||||||
* the PCI subsystem.
|
* the PCI subsystem.
|
||||||
|
|
|
@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
|
||||||
/*
|
/*
|
||||||
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
|
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
|
||||||
*/
|
*/
|
||||||
if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
|
if (parent &&
|
||||||
|
(dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
|
||||||
|
dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
|
||||||
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
|
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
|
||||||
dd->link_gen3_capable = 0;
|
dd->link_gen3_capable = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||||
if (qp->s_flags & RVT_S_WAIT_RNR)
|
if (qp->s_flags & RVT_S_WAIT_RNR)
|
||||||
goto bail_stop;
|
goto bail_stop;
|
||||||
rdi = ib_to_rvt(qp->ibqp.device);
|
rdi = ib_to_rvt(qp->ibqp.device);
|
||||||
if (qp->s_rnr_retry == 0 &&
|
if (!(rdi->post_parms[wqe->wr.opcode].flags &
|
||||||
!((rdi->post_parms[wqe->wr.opcode].flags &
|
RVT_OPERATION_IGN_RNR_CNT)) {
|
||||||
RVT_OPERATION_IGN_RNR_CNT) &&
|
if (qp->s_rnr_retry == 0) {
|
||||||
qp->s_rnr_retry_cnt == 0)) {
|
status = IB_WC_RNR_RETRY_EXC_ERR;
|
||||||
status = IB_WC_RNR_RETRY_EXC_ERR;
|
goto class_b;
|
||||||
goto class_b;
|
}
|
||||||
|
if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
|
||||||
|
qp->s_rnr_retry--;
|
||||||
}
|
}
|
||||||
if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
|
|
||||||
qp->s_rnr_retry--;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The last valid PSN is the previous PSN. For TID RDMA WRITE
|
* The last valid PSN is the previous PSN. For TID RDMA WRITE
|
||||||
|
|
|
@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
|
||||||
* C - Capcode
|
* C - Capcode
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static u32 tid_rdma_flow_wt;
|
|
||||||
|
|
||||||
static void tid_rdma_trigger_resume(struct work_struct *work);
|
static void tid_rdma_trigger_resume(struct work_struct *work);
|
||||||
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
|
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
|
||||||
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
||||||
|
@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
|
||||||
struct tid_rdma_flow *flow,
|
struct tid_rdma_flow *flow,
|
||||||
bool fecn);
|
bool fecn);
|
||||||
|
|
||||||
|
static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
|
||||||
|
{
|
||||||
|
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
||||||
|
priv->r_tid_ack = priv->r_tid_tail;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tid_rdma_schedule_ack(struct rvt_qp *qp)
|
||||||
|
{
|
||||||
|
struct hfi1_qp_priv *priv = qp->priv;
|
||||||
|
|
||||||
|
priv->s_flags |= RVT_S_ACK_PENDING;
|
||||||
|
hfi1_schedule_tid_send(qp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tid_rdma_trigger_ack(struct rvt_qp *qp)
|
||||||
|
{
|
||||||
|
validate_r_tid_ack(qp->priv);
|
||||||
|
tid_rdma_schedule_ack(qp);
|
||||||
|
}
|
||||||
|
|
||||||
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
|
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
|
||||||
{
|
{
|
||||||
return
|
return
|
||||||
|
@ -3005,10 +3023,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||||
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
|
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
|
||||||
/* We are NAK'ing the next expected PSN */
|
/* We are NAK'ing the next expected PSN */
|
||||||
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
|
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
|
||||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
tid_rdma_trigger_ack(qp);
|
||||||
if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
|
||||||
qpriv->r_tid_ack = qpriv->r_tid_tail;
|
|
||||||
hfi1_schedule_tid_send(qp);
|
|
||||||
}
|
}
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||||
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
|
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hfi1_compute_tid_rdma_flow_wt(void)
|
static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Heuristic for computing the RNR timeout when waiting on the flow
|
* Heuristic for computing the RNR timeout when waiting on the flow
|
||||||
* queue. Rather than a computationaly expensive exact estimate of when
|
* queue. Rather than a computationaly expensive exact estimate of when
|
||||||
* a flow will be available, we assume that if a QP is at position N in
|
* a flow will be available, we assume that if a QP is at position N in
|
||||||
* the flow queue it has to wait approximately (N + 1) * (number of
|
* the flow queue it has to wait approximately (N + 1) * (number of
|
||||||
* segments between two sync points), assuming PMTU of 4K. The rationale
|
* segments between two sync points). The rationale for this is that
|
||||||
* for this is that flows are released and recycled at each sync point.
|
* flows are released and recycled at each sync point.
|
||||||
*/
|
*/
|
||||||
tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
|
return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
|
||||||
TID_RDMA_MAX_SEGMENT_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
|
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
|
||||||
|
@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||||
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
|
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
|
||||||
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
|
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
to_seg = tid_rdma_flow_wt *
|
to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
|
||||||
position_in_queue(qpriv,
|
position_in_queue(qpriv,
|
||||||
&rcd->flow_queue);
|
&rcd->flow_queue);
|
||||||
break;
|
break;
|
||||||
|
@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||||
/*
|
/*
|
||||||
* If overtaking req->acked_tail, send an RNR NAK. Because the
|
* If overtaking req->acked_tail, send an RNR NAK. Because the
|
||||||
* QP is not queued in this case, and the issue can only be
|
* QP is not queued in this case, and the issue can only be
|
||||||
* caused due a delay in scheduling the second leg which we
|
* caused by a delay in scheduling the second leg which we
|
||||||
* cannot estimate, we use a rather arbitrary RNR timeout of
|
* cannot estimate, we use a rather arbitrary RNR timeout of
|
||||||
* (MAX_FLOWS / 2) segments
|
* (MAX_FLOWS / 2) segments
|
||||||
*/
|
*/
|
||||||
|
@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||||
MAX_FLOWS)) {
|
MAX_FLOWS)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
to_seg = MAX_FLOWS >> 1;
|
to_seg = MAX_FLOWS >> 1;
|
||||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
tid_rdma_trigger_ack(qp);
|
||||||
hfi1_schedule_tid_send(qp);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||||
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
|
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
|
||||||
req);
|
req);
|
||||||
trace_hfi1_tid_write_rsp_rcv_data(qp);
|
trace_hfi1_tid_write_rsp_rcv_data(qp);
|
||||||
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
validate_r_tid_ack(priv);
|
||||||
priv->r_tid_ack = priv->r_tid_tail;
|
|
||||||
|
|
||||||
if (opcode == TID_OP(WRITE_DATA_LAST)) {
|
if (opcode == TID_OP(WRITE_DATA_LAST)) {
|
||||||
release_rdma_sge_mr(e);
|
release_rdma_sge_mr(e);
|
||||||
|
@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
priv->s_flags |= RVT_S_ACK_PENDING;
|
tid_rdma_schedule_ack(qp);
|
||||||
hfi1_schedule_tid_send(qp);
|
|
||||||
exit:
|
exit:
|
||||||
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
|
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
|
||||||
if (fecn)
|
if (fecn)
|
||||||
|
@ -4388,10 +4399,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||||
if (!priv->s_nak_state) {
|
if (!priv->s_nak_state) {
|
||||||
priv->s_nak_state = IB_NAK_PSN_ERROR;
|
priv->s_nak_state = IB_NAK_PSN_ERROR;
|
||||||
priv->s_nak_psn = flow->flow_state.r_next_psn;
|
priv->s_nak_psn = flow->flow_state.r_next_psn;
|
||||||
priv->s_flags |= RVT_S_ACK_PENDING;
|
tid_rdma_trigger_ack(qp);
|
||||||
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
|
||||||
priv->r_tid_ack = priv->r_tid_tail;
|
|
||||||
hfi1_schedule_tid_send(qp);
|
|
||||||
}
|
}
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
|
||||||
qpriv->resync = true;
|
qpriv->resync = true;
|
||||||
/* RESYNC request always gets a TID RDMA ACK. */
|
/* RESYNC request always gets a TID RDMA ACK. */
|
||||||
qpriv->s_nak_state = 0;
|
qpriv->s_nak_state = 0;
|
||||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
tid_rdma_trigger_ack(qp);
|
||||||
hfi1_schedule_tid_send(qp);
|
|
||||||
bail:
|
bail:
|
||||||
if (fecn)
|
if (fecn)
|
||||||
qp->s_flags |= RVT_S_ECN;
|
qp->s_flags |= RVT_S_ECN;
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||||
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||||
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
|
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
|
||||||
|
#define TID_RDMA_SEGMENT_SHIFT 18
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit definitions for priv->s_flags.
|
* Bit definitions for priv->s_flags.
|
||||||
|
@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||||
struct ib_other_headers *ohdr,
|
struct ib_other_headers *ohdr,
|
||||||
u32 *bth1, u32 *bth2, u32 *len);
|
u32 *bth1, u32 *bth2, u32 *len);
|
||||||
|
|
||||||
void hfi1_compute_tid_rdma_flow_wt(void);
|
|
||||||
|
|
||||||
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
|
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
|
||||||
|
|
||||||
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
|
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
|
||||||
|
|
|
@ -59,7 +59,7 @@ enum {
|
||||||
|
|
||||||
#define HNS_ROCE_HEM_CHUNK_LEN \
|
#define HNS_ROCE_HEM_CHUNK_LEN \
|
||||||
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
|
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
|
||||||
(sizeof(struct scatterlist)))
|
(sizeof(struct scatterlist) + sizeof(void *)))
|
||||||
|
|
||||||
#define check_whether_bt_num_3(type, hop_num) \
|
#define check_whether_bt_num_3(type, hop_num) \
|
||||||
(type < HEM_TYPE_MTT && hop_num == 2)
|
(type < HEM_TYPE_MTT && hop_num == 2)
|
||||||
|
|
|
@ -376,7 +376,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
|
||||||
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
|
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
|
||||||
srq->max_gs = srq_init_attr->attr.max_sge;
|
srq->max_gs = srq_init_attr->attr.max_sge;
|
||||||
|
|
||||||
srq_desc_size = max(16, 16 * srq->max_gs);
|
srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
|
||||||
|
|
||||||
srq->wqe_shift = ilog2(srq_desc_size);
|
srq->wqe_shift = ilog2(srq_desc_size);
|
||||||
|
|
||||||
|
|
|
@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
|
||||||
{
|
{
|
||||||
struct ml_device *ml = ff->private;
|
struct ml_device *ml = ff->private;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even though we stop all playing effects when tearing down
|
||||||
|
* an input device (via input_device_flush() that calls into
|
||||||
|
* input_ff_flush() that stops and erases all effects), we
|
||||||
|
* do not actually stop the timer, and therefore we should
|
||||||
|
* do it here.
|
||||||
|
*/
|
||||||
|
del_timer_sync(&ml->timer);
|
||||||
|
|
||||||
kfree(ml->private);
|
kfree(ml->private);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -177,6 +177,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||||
"LEN0096", /* X280 */
|
"LEN0096", /* X280 */
|
||||||
"LEN0097", /* X280 -> ALPS trackpoint */
|
"LEN0097", /* X280 -> ALPS trackpoint */
|
||||||
"LEN009b", /* T580 */
|
"LEN009b", /* T580 */
|
||||||
|
"LEN0402", /* X1 Extreme 2nd Generation */
|
||||||
"LEN200f", /* T450s */
|
"LEN200f", /* T450s */
|
||||||
"LEN2054", /* E480 */
|
"LEN2054", /* E480 */
|
||||||
"LEN2055", /* E580 */
|
"LEN2055", /* E580 */
|
||||||
|
|
|
@ -510,7 +510,6 @@ struct f11_data {
|
||||||
struct rmi_2d_sensor_platform_data sensor_pdata;
|
struct rmi_2d_sensor_platform_data sensor_pdata;
|
||||||
unsigned long *abs_mask;
|
unsigned long *abs_mask;
|
||||||
unsigned long *rel_mask;
|
unsigned long *rel_mask;
|
||||||
unsigned long *result_bits;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum f11_finger_state {
|
enum f11_finger_state {
|
||||||
|
@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
||||||
/*
|
/*
|
||||||
** init instance data, fill in values and create any sysfs files
|
** init instance data, fill in values and create any sysfs files
|
||||||
*/
|
*/
|
||||||
f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
|
f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!f11)
|
if (!f11)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
||||||
+ sizeof(struct f11_data));
|
+ sizeof(struct f11_data));
|
||||||
f11->rel_mask = (unsigned long *)((char *)f11
|
f11->rel_mask = (unsigned long *)((char *)f11
|
||||||
+ sizeof(struct f11_data) + mask_size);
|
+ sizeof(struct f11_data) + mask_size);
|
||||||
f11->result_bits = (unsigned long *)((char *)f11
|
|
||||||
+ sizeof(struct f11_data) + mask_size * 2);
|
|
||||||
|
|
||||||
set_bit(fn->irq_pos, f11->abs_mask);
|
set_bit(fn->irq_pos, f11->abs_mask);
|
||||||
set_bit(fn->irq_pos + 1, f11->rel_mask);
|
set_bit(fn->irq_pos + 1, f11->rel_mask);
|
||||||
|
@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
|
||||||
valid_bytes = f11->sensor.attn_size;
|
valid_bytes = f11->sensor.attn_size;
|
||||||
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
|
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
|
||||||
valid_bytes);
|
valid_bytes);
|
||||||
drvdata->attn_data.data += f11->sensor.attn_size;
|
drvdata->attn_data.data += valid_bytes;
|
||||||
drvdata->attn_data.size -= f11->sensor.attn_size;
|
drvdata->attn_data.size -= valid_bytes;
|
||||||
} else {
|
} else {
|
||||||
error = rmi_read_block(rmi_dev,
|
error = rmi_read_block(rmi_dev,
|
||||||
data_base_addr, f11->sensor.data_pkt,
|
data_base_addr, f11->sensor.data_pkt,
|
||||||
|
|
|
@ -55,6 +55,9 @@ struct f12_data {
|
||||||
|
|
||||||
const struct rmi_register_desc_item *data15;
|
const struct rmi_register_desc_item *data15;
|
||||||
u16 data15_offset;
|
u16 data15_offset;
|
||||||
|
|
||||||
|
unsigned long *abs_mask;
|
||||||
|
unsigned long *rel_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
|
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
|
||||||
|
@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
|
||||||
valid_bytes = sensor->attn_size;
|
valid_bytes = sensor->attn_size;
|
||||||
memcpy(sensor->data_pkt, drvdata->attn_data.data,
|
memcpy(sensor->data_pkt, drvdata->attn_data.data,
|
||||||
valid_bytes);
|
valid_bytes);
|
||||||
drvdata->attn_data.data += sensor->attn_size;
|
drvdata->attn_data.data += valid_bytes;
|
||||||
drvdata->attn_data.size -= sensor->attn_size;
|
drvdata->attn_data.size -= valid_bytes;
|
||||||
} else {
|
} else {
|
||||||
retval = rmi_read_block(rmi_dev, f12->data_addr,
|
retval = rmi_read_block(rmi_dev, f12->data_addr,
|
||||||
sensor->data_pkt, sensor->pkt_size);
|
sensor->data_pkt, sensor->pkt_size);
|
||||||
|
@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
|
||||||
static int rmi_f12_config(struct rmi_function *fn)
|
static int rmi_f12_config(struct rmi_function *fn)
|
||||||
{
|
{
|
||||||
struct rmi_driver *drv = fn->rmi_dev->driver;
|
struct rmi_driver *drv = fn->rmi_dev->driver;
|
||||||
|
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
|
||||||
|
struct rmi_2d_sensor *sensor;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
|
sensor = &f12->sensor;
|
||||||
|
|
||||||
|
if (!sensor->report_abs)
|
||||||
|
drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
|
||||||
|
else
|
||||||
|
drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
|
||||||
|
|
||||||
|
drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
|
||||||
|
|
||||||
ret = rmi_f12_write_control_regs(fn);
|
ret = rmi_f12_write_control_regs(fn);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
|
||||||
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
|
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
|
||||||
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
|
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
|
||||||
u16 data_offset = 0;
|
u16 data_offset = 0;
|
||||||
|
int mask_size;
|
||||||
|
|
||||||
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
|
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
|
||||||
|
|
||||||
|
mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
|
||||||
|
|
||||||
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
|
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&fn->dev, "Failed to read general info register: %d\n",
|
dev_err(&fn->dev, "Failed to read general info register: %d\n",
|
||||||
|
@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
|
f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
|
||||||
|
GFP_KERNEL);
|
||||||
if (!f12)
|
if (!f12)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
f12->abs_mask = (unsigned long *)((char *)f12
|
||||||
|
+ sizeof(struct f12_data));
|
||||||
|
f12->rel_mask = (unsigned long *)((char *)f12
|
||||||
|
+ sizeof(struct f12_data) + mask_size);
|
||||||
|
|
||||||
|
set_bit(fn->irq_pos, f12->abs_mask);
|
||||||
|
set_bit(fn->irq_pos + 1, f12->rel_mask);
|
||||||
|
|
||||||
f12->has_dribble = !!(buf & BIT(3));
|
f12->has_dribble = !!(buf & BIT(3));
|
||||||
|
|
||||||
if (fn->dev.of_node) {
|
if (fn->dev.of_node) {
|
||||||
|
|
|
@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
|
||||||
static const struct vb2_queue rmi_f54_queue = {
|
static const struct vb2_queue rmi_f54_queue = {
|
||||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
|
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
|
||||||
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
|
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
|
||||||
.buf_struct_size = sizeof(struct vb2_buffer),
|
.buf_struct_size = sizeof(struct vb2_v4l2_buffer),
|
||||||
.ops = &rmi_f54_queue_ops,
|
.ops = &rmi_f54_queue_ops,
|
||||||
.mem_ops = &vb2_vmalloc_memops,
|
.mem_ops = &vb2_vmalloc_memops,
|
||||||
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
|
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
|
||||||
|
@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
|
||||||
{
|
{
|
||||||
struct rmi_driver *drv = fn->rmi_dev->driver;
|
struct rmi_driver *drv = fn->rmi_dev->driver;
|
||||||
|
|
||||||
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
|
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
|
||||||
|
|
||||||
video_unregister_device(&f54->vdev);
|
video_unregister_device(&f54->vdev);
|
||||||
v4l2_device_unregister(&f54->v4l2);
|
v4l2_device_unregister(&f54->v4l2);
|
||||||
|
destroy_workqueue(f54->workqueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct rmi_function_handler rmi_f54_handler = {
|
struct rmi_function_handler rmi_f54_handler = {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue