Linux 5.11-rc7
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmAgYlIeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGfDgH/238n8RZn/++TGmC QizPA2sIAHWS1XcRCHNiYR4p6e8JVEaWPohgtKxT10GgK9EqqK58sUp/+NxVymKH ms2Rwp4wPj+xqIO7qKZSfKaGQZq96WaP9qM7IDkmkdDElZm/Ml6wc9fFj3Eo977y jMp4kqXyBZh6jPS36xWulK0MaGoW9ISnB/H65NF30q0LB+hixgOK/ZItfYWW/cE7 vWvSZHUvxjQ4dhr4T0wIu9ie0MtMGB1MupQa9XGjplIlINKoqZfalzpYwkutOPfY ABSCGMhN9veL5akhxOsuDFCkc9ff99o/+BkeEsrC68ZayJgeWMudfedULDEbANen 81owIAs= =K0fR -----END PGP SIGNATURE----- Merge v5.11-rc7 into android-mainline Linux 5.11-rc7 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I5df70d40d93f98af1756f1dafceb23a59ba95015
This commit is contained in:
commit
19594b801f
7
.mailmap
7
.mailmap
|
@ -179,6 +179,8 @@ Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
|
|||
Kees Cook <keescook@chromium.org> <keescook@google.com>
|
||||
Kees Cook <keescook@chromium.org> <kees@outflux.net>
|
||||
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
|
||||
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
|
||||
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
|
||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
||||
|
@ -199,6 +201,8 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
|||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
|
||||
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
|
||||
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
|
||||
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
|
||||
Mark Brown <broonie@sirena.org.uk>
|
||||
|
@ -244,6 +248,7 @@ Morten Welinder <welinder@anemone.rentec.com>
|
|||
Morten Welinder <welinder@darter.rentec.com>
|
||||
Morten Welinder <welinder@troll.com>
|
||||
Mythri P K <mythripk@ti.com>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Nguyen Anh Quynh <aquynh@gmail.com>
|
||||
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
|
||||
|
@ -334,6 +339,8 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
|
|||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
|
||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
|
||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
|
||||
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
|
||||
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
|
||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
||||
|
|
|
@ -75,7 +75,7 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
|
|||
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
|
||||
$(PYTHON) $(srctree)/scripts/jobserver-exec \
|
||||
$(PYTHON3) $(srctree)/scripts/jobserver-exec \
|
||||
$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
|
||||
$(SPHINXBUILD) \
|
||||
-b $2 \
|
||||
|
|
|
@ -70,8 +70,8 @@ trampoline code on the vDSO, that trampoline is never intercepted.
|
|||
[selector] is a pointer to a char-sized region in the process memory
|
||||
region, that provides a quick way to enable disable syscall redirection
|
||||
thread-wide, without the need to invoke the kernel directly. selector
|
||||
can be set to PR_SYS_DISPATCH_ON or PR_SYS_DISPATCH_OFF. Any other
|
||||
value should terminate the program with a SIGSYS.
|
||||
can be set to SYSCALL_DISPATCH_FILTER_ALLOW or SYSCALL_DISPATCH_FILTER_BLOCK.
|
||||
Any other value should terminate the program with a SIGSYS.
|
||||
|
||||
Security Notes
|
||||
--------------
|
||||
|
|
|
@ -5,7 +5,8 @@ Required properties:
|
|||
- compatible: "adc-keys"
|
||||
- io-channels: Phandle to an ADC channel
|
||||
- io-channel-names = "buttons";
|
||||
- keyup-threshold-microvolt: Voltage at which all the keys are considered up.
|
||||
- keyup-threshold-microvolt: Voltage above or equal to which all the keys are
|
||||
considered up.
|
||||
|
||||
Optional properties:
|
||||
- poll-interval: Poll interval time in milliseconds
|
||||
|
@ -17,7 +18,12 @@ Each button (key) is represented as a sub-node of "adc-keys":
|
|||
Required subnode-properties:
|
||||
- label: Descriptive name of the key.
|
||||
- linux,code: Keycode to emit.
|
||||
- press-threshold-microvolt: Voltage ADC input when this key is pressed.
|
||||
- press-threshold-microvolt: voltage above or equal to which this key is
|
||||
considered pressed.
|
||||
|
||||
No two values of press-threshold-microvolt may be the same.
|
||||
All values of press-threshold-microvolt must be less than
|
||||
keyup-threshold-microvolt.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -47,3 +53,15 @@ Example:
|
|||
press-threshold-microvolt = <500000>;
|
||||
};
|
||||
};
|
||||
|
||||
+--------------------------------+------------------------+
|
||||
| 2.000.000 <= value | no key pressed |
|
||||
+--------------------------------+------------------------+
|
||||
| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
|
||||
+--------------------------------+------------------------+
|
||||
| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
|
||||
+--------------------------------+------------------------+
|
||||
| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
|
||||
+--------------------------------+------------------------+
|
||||
| value < 500.000 | no key pressed |
|
||||
+--------------------------------+------------------------+
|
||||
|
|
|
@ -26,6 +26,7 @@ properties:
|
|||
- goodix,gt927
|
||||
- goodix,gt9271
|
||||
- goodix,gt928
|
||||
- goodix,gt9286
|
||||
- goodix,gt967
|
||||
|
||||
reg:
|
||||
|
|
|
@ -11,16 +11,13 @@ compiler [1]_. They are useful for runtime instrumentation and static analysis.
|
|||
We can analyse, change and add further code during compilation via
|
||||
callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
|
||||
|
||||
The GCC plugin infrastructure of the kernel supports all gcc versions from
|
||||
4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
|
||||
separate directory.
|
||||
Plugin source files have to be compilable by both a C and a C++ compiler as well
|
||||
because gcc versions 4.5 and 4.6 are compiled by a C compiler,
|
||||
gcc-4.7 can be compiled by a C or a C++ compiler,
|
||||
and versions 4.8+ can only be compiled by a C++ compiler.
|
||||
The GCC plugin infrastructure of the kernel supports building out-of-tree
|
||||
modules, cross-compilation and building in a separate directory.
|
||||
Plugin source files have to be compilable by a C++ compiler.
|
||||
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
|
||||
powerpc architectures.
|
||||
Currently the GCC plugin infrastructure supports only some architectures.
|
||||
Grep "select HAVE_GCC_PLUGINS" to find out which architectures support
|
||||
GCC plugins.
|
||||
|
||||
This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
|
||||
|
||||
|
@ -47,20 +44,13 @@ Files
|
|||
This is a compatibility header for GCC plugins.
|
||||
It should be always included instead of individual gcc headers.
|
||||
|
||||
**$(src)/scripts/gcc-plugin.sh**
|
||||
|
||||
This script checks the availability of the included headers in
|
||||
gcc-common.h and chooses the proper host compiler to build the plugins
|
||||
(gcc-4.7 can be built by either gcc or g++).
|
||||
|
||||
**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
|
||||
|
||||
These headers automatically generate the registration structures for
|
||||
GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
|
||||
from 4.5 to 6.0.
|
||||
GIMPLE, SIMPLE_IPA, IPA and RTL passes.
|
||||
They should be preferred to creating the structures by hand.
|
||||
|
||||
|
||||
|
@ -68,21 +58,25 @@ Usage
|
|||
=====
|
||||
|
||||
You must install the gcc plugin headers for your gcc version,
|
||||
e.g., on Ubuntu for gcc-4.9::
|
||||
e.g., on Ubuntu for gcc-10::
|
||||
|
||||
apt-get install gcc-4.9-plugin-dev
|
||||
apt-get install gcc-10-plugin-dev
|
||||
|
||||
Or on Fedora::
|
||||
|
||||
dnf install gcc-plugin-devel
|
||||
|
||||
Enable a GCC plugin based feature in the kernel config::
|
||||
Enable the GCC plugin infrastructure and some plugin(s) you want to use
|
||||
in the kernel config::
|
||||
|
||||
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
|
||||
CONFIG_GCC_PLUGINS=y
|
||||
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY=y
|
||||
CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
|
||||
...
|
||||
|
||||
To compile only the plugin(s)::
|
||||
To compile the minimum tool set including the plugin(s)::
|
||||
|
||||
make gcc-plugins
|
||||
make scripts
|
||||
|
||||
or just run the kernel make and compile the whole kernel with
|
||||
the cyclomatic complexity GCC plugin.
|
||||
|
@ -91,7 +85,8 @@ the cyclomatic complexity GCC plugin.
|
|||
4. How to add a new GCC plugin
|
||||
==============================
|
||||
|
||||
The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
|
||||
here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
|
||||
$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
|
||||
The GCC plugins are in scripts/gcc-plugins/. You need to put plugin source files
|
||||
right under scripts/gcc-plugins/. Creating subdirectories is not supported.
|
||||
It must be added to scripts/gcc-plugins/Makefile, scripts/Makefile.gcc-plugins
|
||||
and a relevant Kconfig file.
|
||||
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
|
||||
|
|
|
@ -63,6 +63,50 @@ They can be enabled individually. The full list of the parameters: ::
|
|||
Currently, the integrated assembler is disabled by default. You can pass
|
||||
``LLVM_IAS=1`` to enable it.
|
||||
|
||||
Supported Architectures
|
||||
-----------------------
|
||||
|
||||
LLVM does not target all of the architectures that Linux supports and
|
||||
just because a target is supported in LLVM does not mean that the kernel
|
||||
will build or work without any issues. Below is a general summary of
|
||||
architectures that currently work with ``CC=clang`` or ``LLVM=1``. Level
|
||||
of support corresponds to "S" values in the MAINTAINERS files. If an
|
||||
architecture is not present, it either means that LLVM does not target
|
||||
it or there are known issues. Using the latest stable version of LLVM or
|
||||
even the development tree will generally yield the best results.
|
||||
An architecture's ``defconfig`` is generally expected to work well,
|
||||
certain configurations may have problems that have not been uncovered
|
||||
yet. Bug reports are always welcome at the issue tracker below!
|
||||
|
||||
.. list-table::
|
||||
:widths: 10 10 10
|
||||
:header-rows: 1
|
||||
|
||||
* - Architecture
|
||||
- Level of support
|
||||
- ``make`` command
|
||||
* - arm
|
||||
- Supported
|
||||
- ``LLVM=1``
|
||||
* - arm64
|
||||
- Supported
|
||||
- ``LLVM=1``
|
||||
* - mips
|
||||
- Maintained
|
||||
- ``CC=clang``
|
||||
* - powerpc
|
||||
- Maintained
|
||||
- ``CC=clang``
|
||||
* - riscv
|
||||
- Maintained
|
||||
- ``CC=clang``
|
||||
* - s390
|
||||
- Maintained
|
||||
- ``CC=clang``
|
||||
* - x86
|
||||
- Supported
|
||||
- ``LLVM=1``
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
|
|
|
@ -755,7 +755,7 @@ more details, with real examples.
|
|||
bits on the scripts nonetheless.
|
||||
|
||||
Kbuild provides variables $(CONFIG_SHELL), $(AWK), $(PERL),
|
||||
$(PYTHON) and $(PYTHON3) to refer to interpreters for the respective
|
||||
and $(PYTHON3) to refer to interpreters for the respective
|
||||
scripts.
|
||||
|
||||
Example::
|
||||
|
|
|
@ -37,8 +37,10 @@ call L2.
|
|||
Running nested VMX
|
||||
------------------
|
||||
|
||||
The nested VMX feature is disabled by default. It can be enabled by giving
|
||||
the "nested=1" option to the kvm-intel module.
|
||||
The nested VMX feature is enabled by default since Linux kernel v4.20. For
|
||||
older Linux kernel, it can be enabled by giving the "nested=1" option to the
|
||||
kvm-intel module.
|
||||
|
||||
|
||||
No modifications are required to user space (qemu). However, qemu's default
|
||||
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
|
||||
|
|
|
@ -74,7 +74,7 @@ few:
|
|||
Enabling "nested" (x86)
|
||||
-----------------------
|
||||
|
||||
From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
|
||||
From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
|
||||
by default for Intel and AMD. (Though your Linux distribution might
|
||||
override this default.)
|
||||
|
||||
|
|
|
@ -4304,7 +4304,7 @@ S: Maintained
|
|||
F: .clang-format
|
||||
|
||||
CLANG/LLVM BUILD SUPPORT
|
||||
M: Nathan Chancellor <natechancellor@gmail.com>
|
||||
M: Nathan Chancellor <nathan@kernel.org>
|
||||
M: Nick Desaulniers <ndesaulniers@google.com>
|
||||
L: clang-built-linux@googlegroups.com
|
||||
S: Supported
|
||||
|
|
15
Makefile
15
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -453,7 +453,6 @@ AWK = awk
|
|||
INSTALLKERNEL := installkernel
|
||||
DEPMOD = depmod
|
||||
PERL = perl
|
||||
PYTHON = python
|
||||
PYTHON3 = python3
|
||||
CHECK = sparse
|
||||
BASH = bash
|
||||
|
@ -509,7 +508,7 @@ CLANG_FLAGS :=
|
|||
|
||||
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
|
||||
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
||||
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
||||
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
|
||||
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
|
||||
|
||||
|
@ -813,10 +812,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
|
|||
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
|
||||
endif
|
||||
|
||||
DEBUG_CFLAGS :=
|
||||
|
||||
# Workaround for GCC versions < 5.0
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
|
||||
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO
|
||||
|
@ -949,12 +950,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
|
|||
# change __FILE__ to the relative path from the srctree
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
|
||||
# ensure -fcf-protection is disabled when using retpoline as it is
|
||||
# incompatible with -mindirect-branch=thunk-extern
|
||||
ifdef CONFIG_RETPOLINE
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
||||
endif
|
||||
|
||||
# include additional Makefiles when needed
|
||||
include-y := scripts/Makefile.extrawarn
|
||||
include-$(CONFIG_KASAN) += scripts/Makefile.kasan
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ARM_KEXEC_INTERNAL_H
|
||||
#define _ARM_KEXEC_INTERNAL_H
|
||||
|
||||
struct kexec_relocate_data {
|
||||
unsigned long kexec_start_address;
|
||||
unsigned long kexec_indirection_page;
|
||||
unsigned long kexec_mach_type;
|
||||
unsigned long kexec_r2;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -149,7 +149,34 @@
|
|||
|
||||
.align
|
||||
99: .word .
|
||||
#if defined(ZIMAGE)
|
||||
.word . + 4
|
||||
/*
|
||||
* Storage for the state maintained by the macro.
|
||||
*
|
||||
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
|
||||
* That's because this header is included from multiple files, and we only
|
||||
* want a single copy of the data. In particular, the UART probing code above
|
||||
* assumes it's running using physical addresses. This is true when this file
|
||||
* is included from head.o, but not when included from debug.o. So we need
|
||||
* to share the probe results between the two copies, rather than having
|
||||
* to re-run the probing again later.
|
||||
*
|
||||
* In the decompressor, we put the storage right here, since common.c
|
||||
* isn't included in the decompressor build. This storage data gets put in
|
||||
* .text even though it's really data, since .data is discarded from the
|
||||
* decompressor. Luckily, .text is writeable in the decompressor, unless
|
||||
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
|
||||
*/
|
||||
/* Debug UART initialization required */
|
||||
.word 1
|
||||
/* Debug UART physical address */
|
||||
.word 0
|
||||
/* Debug UART virtual address */
|
||||
.word 0
|
||||
#else
|
||||
.word tegra_uart_config
|
||||
#endif
|
||||
.ltorg
|
||||
|
||||
/* Load previously selected UART address */
|
||||
|
@ -189,30 +216,3 @@
|
|||
|
||||
.macro waituarttxrdy,rd,rx
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Storage for the state maintained by the macros above.
|
||||
*
|
||||
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
|
||||
* That's because this header is included from multiple files, and we only
|
||||
* want a single copy of the data. In particular, the UART probing code above
|
||||
* assumes it's running using physical addresses. This is true when this file
|
||||
* is included from head.o, but not when included from debug.o. So we need
|
||||
* to share the probe results between the two copies, rather than having
|
||||
* to re-run the probing again later.
|
||||
*
|
||||
* In the decompressor, we put the symbol/storage right here, since common.c
|
||||
* isn't included in the decompressor build. This symbol gets put in .text
|
||||
* even though it's really data, since .data is discarded from the
|
||||
* decompressor. Luckily, .text is writeable in the decompressor, unless
|
||||
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
|
||||
*/
|
||||
#if defined(ZIMAGE)
|
||||
tegra_uart_config:
|
||||
/* Debug UART initialization required */
|
||||
.word 1
|
||||
/* Debug UART physical address */
|
||||
.word 0
|
||||
/* Debug UART virtual address */
|
||||
.word 0
|
||||
#endif
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kexec-internal.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
#include <asm/mach/arch.h>
|
||||
|
@ -170,5 +171,9 @@ int main(void)
|
|||
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
|
||||
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
|
||||
#endif
|
||||
DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
|
||||
DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
|
||||
DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
|
||||
DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/of_fdt.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kexec-internal.h>
|
||||
#include <asm/fncpy.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
@ -22,11 +23,6 @@
|
|||
extern void relocate_new_kernel(void);
|
||||
extern const unsigned int relocate_new_kernel_size;
|
||||
|
||||
extern unsigned long kexec_start_address;
|
||||
extern unsigned long kexec_indirection_page;
|
||||
extern unsigned long kexec_mach_type;
|
||||
extern unsigned long kexec_boot_atags;
|
||||
|
||||
static atomic_t waiting_for_crash_ipi;
|
||||
|
||||
/*
|
||||
|
@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
|
|||
void machine_kexec(struct kimage *image)
|
||||
{
|
||||
unsigned long page_list, reboot_entry_phys;
|
||||
struct kexec_relocate_data *data;
|
||||
void (*reboot_entry)(void);
|
||||
void *reboot_code_buffer;
|
||||
|
||||
|
@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
|
|||
|
||||
reboot_code_buffer = page_address(image->control_code_page);
|
||||
|
||||
/* Prepare parameters for reboot_code_buffer*/
|
||||
set_kernel_text_rw();
|
||||
kexec_start_address = image->start;
|
||||
kexec_indirection_page = page_list;
|
||||
kexec_mach_type = machine_arch_type;
|
||||
kexec_boot_atags = image->arch.kernel_r2;
|
||||
|
||||
/* copy our kernel relocation code to the control code page */
|
||||
reboot_entry = fncpy(reboot_code_buffer,
|
||||
&relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
|
||||
data = reboot_code_buffer + relocate_new_kernel_size;
|
||||
data->kexec_start_address = image->start;
|
||||
data->kexec_indirection_page = page_list;
|
||||
data->kexec_mach_type = machine_arch_type;
|
||||
data->kexec_r2 = image->arch.kernel_r2;
|
||||
|
||||
/* get the identity mapping physical address for the reboot code */
|
||||
reboot_entry_phys = virt_to_idmap(reboot_entry);
|
||||
|
||||
|
|
|
@ -5,14 +5,16 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
.align 3 /* not needed for this code, but keeps fncpy() happy */
|
||||
|
||||
ENTRY(relocate_new_kernel)
|
||||
|
||||
ldr r0,kexec_indirection_page
|
||||
ldr r1,kexec_start_address
|
||||
adr r7, relocate_new_kernel_end
|
||||
ldr r0, [r7, #KEXEC_INDIR_PAGE]
|
||||
ldr r1, [r7, #KEXEC_START_ADDR]
|
||||
|
||||
/*
|
||||
* If there is no indirection page (we are doing crashdumps)
|
||||
|
@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
|
|||
|
||||
2:
|
||||
/* Jump to relocated kernel */
|
||||
mov lr,r1
|
||||
mov r0,#0
|
||||
ldr r1,kexec_mach_type
|
||||
ldr r2,kexec_boot_atags
|
||||
ARM( ret lr )
|
||||
THUMB( bx lr )
|
||||
|
||||
.align
|
||||
|
||||
.globl kexec_start_address
|
||||
kexec_start_address:
|
||||
.long 0x0
|
||||
|
||||
.globl kexec_indirection_page
|
||||
kexec_indirection_page:
|
||||
.long 0x0
|
||||
|
||||
.globl kexec_mach_type
|
||||
kexec_mach_type:
|
||||
.long 0x0
|
||||
|
||||
/* phy addr of the atags for the new kernel */
|
||||
.globl kexec_boot_atags
|
||||
kexec_boot_atags:
|
||||
.long 0x0
|
||||
mov lr, r1
|
||||
mov r0, #0
|
||||
ldr r1, [r7, #KEXEC_MACH_TYPE]
|
||||
ldr r2, [r7, #KEXEC_R2]
|
||||
ARM( ret lr )
|
||||
THUMB( bx lr )
|
||||
|
||||
ENDPROC(relocate_new_kernel)
|
||||
|
||||
.align 3
|
||||
relocate_new_kernel_end:
|
||||
|
||||
.globl relocate_new_kernel_size
|
||||
|
|
|
@ -693,18 +693,20 @@ struct page *get_signal_page(void)
|
|||
|
||||
addr = page_address(page);
|
||||
|
||||
/* Poison the entire page */
|
||||
memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
|
||||
PAGE_SIZE / sizeof(u32));
|
||||
|
||||
/* Give the signal return code some randomness */
|
||||
offset = 0x200 + (get_random_int() & 0x7fc);
|
||||
signal_return_offset = offset;
|
||||
|
||||
/*
|
||||
* Copy signal return handlers into the vector page, and
|
||||
* set sigreturn to be a pointer to these.
|
||||
*/
|
||||
/* Copy signal return handlers into the page */
|
||||
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
|
||||
|
||||
ptr = (unsigned long)addr + offset;
|
||||
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
|
||||
/* Flush out all instructions in this page */
|
||||
ptr = (unsigned long)addr;
|
||||
flush_icache_range(ptr, ptr + PAGE_SIZE);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
|
|||
if (addr)
|
||||
switch (size) {
|
||||
case 1:
|
||||
asm("ldrb %0, [%1, %2]"
|
||||
asm volatile("ldrb %0, [%1, %2]"
|
||||
: "=r" (v) : "r" (addr), "r" (where) : "cc");
|
||||
break;
|
||||
case 2:
|
||||
asm("ldrh %0, [%1, %2]"
|
||||
asm volatile("ldrh %0, [%1, %2]"
|
||||
: "=r" (v) : "r" (addr), "r" (where) : "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm("ldr %0, [%1, %2]"
|
||||
asm volatile("ldr %0, [%1, %2]"
|
||||
: "=r" (v) : "r" (addr), "r" (where) : "cc");
|
||||
break;
|
||||
}
|
||||
|
@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
|
|||
if (addr)
|
||||
switch (size) {
|
||||
case 1:
|
||||
asm("strb %0, [%1, %2]"
|
||||
asm volatile("strb %0, [%1, %2]"
|
||||
: : "r" (value), "r" (addr), "r" (where)
|
||||
: "cc");
|
||||
break;
|
||||
case 2:
|
||||
asm("strh %0, [%1, %2]"
|
||||
asm volatile("strh %0, [%1, %2]"
|
||||
: : "r" (value), "r" (addr), "r" (where)
|
||||
: "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm("str %0, [%1, %2]"
|
||||
asm volatile("str %0, [%1, %2]"
|
||||
: : "r" (value), "r" (addr), "r" (where)
|
||||
: "cc");
|
||||
break;
|
||||
|
|
|
@ -47,6 +47,8 @@ __invalid:
|
|||
b .
|
||||
|
||||
/*
|
||||
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
|
||||
*
|
||||
* x0: SMCCC function ID
|
||||
* x1: struct kvm_nvhe_init_params PA
|
||||
*/
|
||||
|
@ -70,9 +72,9 @@ __do_hyp_init:
|
|||
eret
|
||||
|
||||
1: mov x0, x1
|
||||
mov x4, lr
|
||||
bl ___kvm_hyp_init
|
||||
mov lr, x4
|
||||
mov x3, lr
|
||||
bl ___kvm_hyp_init // Clobbers x0..x2
|
||||
mov lr, x3
|
||||
|
||||
/* Hello, World! */
|
||||
mov x0, #SMCCC_RET_SUCCESS
|
||||
|
@ -82,8 +84,8 @@ SYM_CODE_END(__kvm_hyp_init)
|
|||
/*
|
||||
* Initialize the hypervisor in EL2.
|
||||
*
|
||||
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
|
||||
* and leave x4 for the caller.
|
||||
* Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
|
||||
* and leave x3 for the caller.
|
||||
*
|
||||
* x0: struct kvm_nvhe_init_params PA
|
||||
*/
|
||||
|
@ -112,9 +114,9 @@ alternative_else_nop_endif
|
|||
/*
|
||||
* Set the PS bits in TCR_EL2.
|
||||
*/
|
||||
ldr x1, [x0, #NVHE_INIT_TCR_EL2]
|
||||
tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
|
||||
msr tcr_el2, x1
|
||||
ldr x0, [x0, #NVHE_INIT_TCR_EL2]
|
||||
tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
|
||||
msr tcr_el2, x0
|
||||
|
||||
isb
|
||||
|
||||
|
@ -193,7 +195,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
|||
|
||||
/* Enable MMU, set vectors and stack. */
|
||||
mov x0, x28
|
||||
bl ___kvm_hyp_init // Clobbers x0..x3
|
||||
bl ___kvm_hyp_init // Clobbers x0..x2
|
||||
|
||||
/* Leave idmap. */
|
||||
mov x0, x29
|
||||
|
|
|
@ -69,7 +69,7 @@ vmlinux.bin: vmlinux FORCE
|
|||
$(call if_changed,objcopy)
|
||||
|
||||
unwcheck: vmlinux
|
||||
-$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $<
|
||||
-$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $<
|
||||
|
||||
archclean:
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Usage: unwcheck.py FILE
|
||||
|
|
|
@ -51,7 +51,7 @@ obj-y += ptrace/
|
|||
obj-$(CONFIG_PPC64) += setup_64.o \
|
||||
paca.o nvram_64.o note.o syscall_64.o
|
||||
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
|
||||
obj-$(CONFIG_VDSO32) += vdso32/
|
||||
obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
|
||||
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_PPC_DAWR) += dawr.o
|
||||
|
@ -60,7 +60,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
|
|||
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
|
||||
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_PPC64) += vdso64_wrapper.o
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
|
||||
procfs-y := proc_powerpc.o
|
||||
|
|
|
@ -30,7 +30,7 @@ CC32FLAGS += -m32
|
|||
KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
|
||||
endif
|
||||
|
||||
targets := $(obj-vdso32) vdso32.so.dbg
|
||||
targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
|
||||
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
|
||||
|
||||
GCOV_PROFILE := n
|
||||
|
@ -46,9 +46,6 @@ obj-y += vdso32_wrapper.o
|
|||
targets += vdso32.lds
|
||||
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
|
||||
|
||||
# link rule for the .so file, .lds has to be first
|
||||
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
|
||||
$(call if_changed,vdso32ld_and_check)
|
||||
|
|
|
@ -17,7 +17,7 @@ endif
|
|||
|
||||
# Build rules
|
||||
|
||||
targets := $(obj-vdso64) vdso64.so.dbg
|
||||
targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
|
||||
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
|
||||
|
||||
GCOV_PROFILE := n
|
||||
|
@ -29,15 +29,9 @@ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
|
|||
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
|
||||
asflags-y := -D__VDSO64__ -s
|
||||
|
||||
obj-y += vdso64_wrapper.o
|
||||
targets += vdso64.lds
|
||||
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
|
||||
|
||||
$(obj)/vgettimeofday.o: %.o: %.c FORCE
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
|
||||
|
||||
# link rule for the .so file, .lds has to be first
|
||||
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
|
||||
$(call if_changed,vdso64ld_and_check)
|
||||
|
|
|
@ -15,11 +15,20 @@
|
|||
|
||||
.text
|
||||
|
||||
/*
|
||||
* __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
|
||||
* are one function split in two parts. The kernel jumps to the former
|
||||
* and the signal handler indirectly (by blr) returns to the latter.
|
||||
* __kernel_sigtramp_rt64 needs to point to the return address so
|
||||
* glibc can correctly identify the trampoline stack frame.
|
||||
*/
|
||||
.balign 8
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
|
||||
V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
|
||||
.Lsigrt_start:
|
||||
bctrl /* call the handler */
|
||||
V_FUNCTION_END(__kernel_start_sigtramp_rt64)
|
||||
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
|
||||
addi r1, r1, __SIGNAL_FRAMESIZE
|
||||
li r0,__NR_rt_sigreturn
|
||||
sc
|
||||
|
|
|
@ -131,4 +131,4 @@ VERSION
|
|||
/*
|
||||
* Make the sigreturn code visible to the kernel.
|
||||
*/
|
||||
VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64;
|
||||
VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
|
||||
|
|
|
@ -818,13 +818,15 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
|
|||
break;
|
||||
if (rev) {
|
||||
/* reverse 32 bytes */
|
||||
buf.d[0] = byterev_8(reg->d[3]);
|
||||
buf.d[1] = byterev_8(reg->d[2]);
|
||||
buf.d[2] = byterev_8(reg->d[1]);
|
||||
buf.d[3] = byterev_8(reg->d[0]);
|
||||
reg = &buf;
|
||||
union vsx_reg buf32[2];
|
||||
buf32[0].d[0] = byterev_8(reg[1].d[1]);
|
||||
buf32[0].d[1] = byterev_8(reg[1].d[0]);
|
||||
buf32[1].d[0] = byterev_8(reg[0].d[1]);
|
||||
buf32[1].d[1] = byterev_8(reg[0].d[0]);
|
||||
memcpy(mem, buf32, size);
|
||||
} else {
|
||||
memcpy(mem, reg, size);
|
||||
}
|
||||
memcpy(mem, reg, size);
|
||||
break;
|
||||
case 16:
|
||||
/* stxv, stxvx, stxvl, stxvll */
|
||||
|
|
|
@ -252,8 +252,10 @@ choice
|
|||
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
|
||||
|
||||
config MAXPHYSMEM_1GB
|
||||
depends on 32BIT
|
||||
bool "1GiB"
|
||||
config MAXPHYSMEM_2GB
|
||||
depends on 64BIT && CMODEL_MEDLOW
|
||||
bool "2GiB"
|
||||
config MAXPHYSMEM_128GB
|
||||
depends on 64BIT && CMODEL_MEDANY
|
||||
|
|
|
@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
|||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
|
||||
#define virt_addr_valid(vaddr) ({ \
|
||||
unsigned long _addr = (unsigned long)vaddr; \
|
||||
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
|
||||
})
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
|
||||
|
|
|
@ -32,14 +32,14 @@ bool kernel_page_present(struct page *page);
|
|||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
#ifdef CONFIG_64BIT
|
||||
#define SECTION_ALIGN (1 << 21)
|
||||
#else
|
||||
#define SECTION_ALIGN (1 << 22)
|
||||
#endif
|
||||
#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
|
||||
#else /* !CONFIG_STRICT_KERNEL_RWX */
|
||||
#define SECTION_ALIGN L1_CACHE_BYTES
|
||||
#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
|
||||
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
||||
|
||||
#endif /* _ASM_RISCV_SET_MEMORY_H */
|
||||
|
|
|
@ -293,6 +293,8 @@ void free_initmem(void)
|
|||
unsigned long init_begin = (unsigned long)__init_begin;
|
||||
unsigned long init_end = (unsigned long)__init_end;
|
||||
|
||||
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
|
||||
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
|
||||
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
|
||||
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
|
|
@ -120,6 +120,9 @@ else
|
|||
|
||||
KBUILD_CFLAGS += -mno-red-zone
|
||||
KBUILD_CFLAGS += -mcmodel=kernel
|
||||
|
||||
# Intel CET isn't enabled in the kernel
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86_X32
|
||||
|
|
|
@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
|
|||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
/*
|
||||
* Make previous memory operations globally visible before
|
||||
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
|
||||
* mfence for this.
|
||||
*/
|
||||
static inline void x2apic_wrmsr_fence(void)
|
||||
{
|
||||
asm volatile("mfence" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void native_apic_msr_write(u32 reg, u32 v)
|
||||
{
|
||||
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
|
||||
|
|
|
@ -84,4 +84,22 @@ do { \
|
|||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
/*
|
||||
* Make previous memory operations globally visible before
|
||||
* a WRMSR.
|
||||
*
|
||||
* MFENCE makes writes visible, but only affects load/store
|
||||
* instructions. WRMSR is unfortunately not a load/store
|
||||
* instruction and is unaffected by MFENCE. The LFENCE ensures
|
||||
* that the WRMSR is not reordered.
|
||||
*
|
||||
* Most WRMSRs are full serializing instructions themselves and
|
||||
* do not require this barrier. This is only required for the
|
||||
* IA32_TSC_DEADLINE and X2APIC MSRs.
|
||||
*/
|
||||
static inline void weak_wrmsr_fence(void)
|
||||
{
|
||||
asm volatile("mfence; lfence" : : : "memory");
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_BARRIER_H */
|
||||
|
|
|
@ -43,8 +43,6 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
|
|||
}
|
||||
#define arch_check_user_regs arch_check_user_regs
|
||||
|
||||
#define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
|
||||
|
||||
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
||||
unsigned long ti_work)
|
||||
{
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/perf_event.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/proto.h>
|
||||
|
@ -477,6 +478,9 @@ static int lapic_next_deadline(unsigned long delta,
|
|||
{
|
||||
u64 tsc;
|
||||
|
||||
/* This MSR is special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
|
||||
tsc = rdtsc();
|
||||
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
|
||||
return 0;
|
||||
|
|
|
@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
|
|||
{
|
||||
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
/* x2apic MSRs are special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
|
||||
}
|
||||
|
||||
|
@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
|
|||
unsigned long flags;
|
||||
u32 dest;
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
/* x2apic MSRs are special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
local_irq_save(flags);
|
||||
|
||||
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
|
||||
|
|
|
@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
|
|||
{
|
||||
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
/* x2apic MSRs are special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
|
||||
|
@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
|
|||
unsigned long this_cpu;
|
||||
unsigned long flags;
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
/* x2apic MSRs are special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
|
|||
{
|
||||
unsigned long cfg = __prepare_ICR(which, vector, 0);
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
/* x2apic MSRs are special and need a special fence: */
|
||||
weak_wrmsr_fence();
|
||||
native_x2apic_icr_write(cfg, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1159,6 +1159,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
|
|||
CPU_ENTRY_AREA_TOTAL_SIZE))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
|
||||
* GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
if (within_area(addr, end, (unsigned long)__per_cpu_offset,
|
||||
sizeof(unsigned long) * nr_cpu_ids))
|
||||
return true;
|
||||
#else
|
||||
if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
|
||||
sizeof(pcpu_unit_offsets)))
|
||||
return true;
|
||||
#endif
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
/* The original rw GDT is being used after load_direct_gdt() */
|
||||
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
|
||||
|
@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
|
|||
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
|
||||
sizeof(struct tlb_state)))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
|
||||
* will read per-cpu cpu_dr7 before clear dr7 register.
|
||||
*/
|
||||
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
|
||||
sizeof(cpu_dr7)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args)
|
|||
struct perf_event *bp;
|
||||
unsigned long *dr6_p;
|
||||
unsigned long dr6;
|
||||
bool bpx;
|
||||
|
||||
/* The DR6 value is pointed by args->err */
|
||||
dr6_p = (unsigned long *)ERR_PTR(args->err);
|
||||
dr6 = *dr6_p;
|
||||
|
||||
/* If it's a single step, TRAP bits are random */
|
||||
if (dr6 & DR_STEP)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Do an early return if no trap bits are set in DR6 */
|
||||
if ((dr6 & DR_TRAP_BITS) == 0)
|
||||
return NOTIFY_DONE;
|
||||
|
@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args)
|
|||
if (likely(!(dr6 & (DR_TRAP0 << i))))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The counter may be concurrently released but that can only
|
||||
* occur from a call_rcu() path. We can then safely fetch
|
||||
* the breakpoint, use its callback, touch its counter
|
||||
* while we are in an rcu_read_lock() path.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
bp = this_cpu_read(bp_per_reg[i]);
|
||||
if (!bp)
|
||||
continue;
|
||||
|
||||
bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
|
||||
|
||||
/*
|
||||
* TF and data breakpoints are traps and can be merged, however
|
||||
* instruction breakpoints are faults and will be raised
|
||||
* separately.
|
||||
*
|
||||
* However DR6 can indicate both TF and instruction
|
||||
* breakpoints. In that case take TF as that has precedence and
|
||||
* delay the instruction breakpoint for the next exception.
|
||||
*/
|
||||
if (bpx && (dr6 & DR_STEP))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Reset the 'i'th TRAP bit in dr6 to denote completion of
|
||||
* exception handling
|
||||
*/
|
||||
(*dr6_p) &= ~(DR_TRAP0 << i);
|
||||
/*
|
||||
* bp can be NULL due to lazy debug register switching
|
||||
* or due to concurrent perf counter removing.
|
||||
*/
|
||||
if (!bp) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
perf_bp_event(bp, args->regs);
|
||||
|
||||
|
@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args)
|
|||
* Set up resume flag to avoid breakpoint recursion when
|
||||
* returning back to origin.
|
||||
*/
|
||||
if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
|
||||
if (bpx)
|
||||
args->regs->flags |= X86_EFLAGS_RF;
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Further processing in do_debug() is needed for a) user-space
|
||||
* breakpoints (to generate signals) and b) when the system has
|
||||
|
|
|
@ -127,12 +127,17 @@ static int enable_single_step(struct task_struct *child)
|
|||
regs->flags |= X86_EFLAGS_TF;
|
||||
|
||||
/*
|
||||
* Always set TIF_SINGLESTEP - this guarantees that
|
||||
* we single-step system calls etc.. This will also
|
||||
* Always set TIF_SINGLESTEP. This will also
|
||||
* cause us to set TF when returning to user mode.
|
||||
*/
|
||||
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
|
||||
/*
|
||||
* Ensure that a trap is triggered once stepping out of a system
|
||||
* call prior to executing any user instruction.
|
||||
*/
|
||||
set_task_syscall_work(child, SYSCALL_EXIT_TRAP);
|
||||
|
||||
oflags = regs->flags;
|
||||
|
||||
/* Set TF on the kernel stack.. */
|
||||
|
@ -230,6 +235,7 @@ void user_disable_single_step(struct task_struct *child)
|
|||
|
||||
/* Always clear TIF_SINGLESTEP... */
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
clear_task_syscall_work(child, SYSCALL_EXIT_TRAP);
|
||||
|
||||
/* But touch TF only if it was set by us.. */
|
||||
if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
|
||||
|
|
|
@ -321,7 +321,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
|||
if (cpuid->nent < vcpu->arch.cpuid_nent)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
|
||||
if (copy_to_user(entries, vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
|
||||
goto out;
|
||||
return 0;
|
||||
|
|
|
@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
|||
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
|
||||
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
|
||||
(u32)msr_data;
|
||||
if (efer & EFER_LMA)
|
||||
ctxt->mode = X86EMUL_MODE_PROT64;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -1049,8 +1049,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|||
}
|
||||
|
||||
/*
|
||||
* Clear non-leaf entries (and free associated page tables) which could
|
||||
* be replaced by large mappings, for GFNs within the slot.
|
||||
* Clear leaf entries which could be replaced by large mappings, for
|
||||
* GFNs within the slot.
|
||||
*/
|
||||
static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||
struct kvm_mmu_page *root,
|
||||
|
@ -1062,7 +1062,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
|||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
if (!is_shadow_present_pte(iter.old_spte) ||
|
||||
is_last_spte(iter.old_spte, iter.level))
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
|
||||
pfn = spte_to_pfn(iter.old_spte);
|
||||
|
|
|
@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
|
|||
|
||||
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
bool vmcb12_lma;
|
||||
|
||||
if ((vmcb12->save.efer & EFER_SVME) == 0)
|
||||
|
@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
|||
|
||||
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
|
||||
|
||||
if (!vmcb12_lma) {
|
||||
if (vmcb12->save.cr4 & X86_CR4_PAE) {
|
||||
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
|
||||
return false;
|
||||
} else {
|
||||
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (vmcb12_lma) {
|
||||
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
|
||||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
|
||||
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
|
||||
(vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
|
||||
return false;
|
||||
}
|
||||
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
|
||||
|
|
|
@ -342,6 +342,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
|||
unsigned long first, last;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
if (ulen == 0 || uaddr + ulen < uaddr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -1119,12 +1121,20 @@ int svm_register_enc_region(struct kvm *kvm,
|
|||
if (!region)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
|
||||
if (IS_ERR(region->pages)) {
|
||||
ret = PTR_ERR(region->pages);
|
||||
mutex_unlock(&kvm->lock);
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
region->uaddr = range->addr;
|
||||
region->size = range->size;
|
||||
|
||||
list_add_tail(®ion->list, &sev->regions_list);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
/*
|
||||
* The guest may change the memory encryption attribute from C=0 -> C=1
|
||||
* or vice versa for this memory range. Lets make sure caches are
|
||||
|
@ -1133,13 +1143,6 @@ int svm_register_enc_region(struct kvm *kvm,
|
|||
*/
|
||||
sev_clflush_pages(region->pages, region->npages);
|
||||
|
||||
region->uaddr = range->addr;
|
||||
region->size = range->size;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
list_add_tail(®ion->list, &sev->regions_list);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return ret;
|
||||
|
||||
e_free:
|
||||
|
|
|
@ -454,6 +454,11 @@ static int has_svm(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (sev_active()) {
|
||||
pr_info("KVM is unsupported when running as an SEV guest\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
|
|||
}
|
||||
|
||||
/* svm.c */
|
||||
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
|
||||
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
|
||||
#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
|
||||
#define MSR_INVALID 0xffffffffU
|
||||
|
||||
extern int sev;
|
||||
|
|
|
@ -6860,11 +6860,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
|||
switch (index) {
|
||||
case MSR_IA32_TSX_CTRL:
|
||||
/*
|
||||
* No need to pass TSX_CTRL_CPUID_CLEAR through, so
|
||||
* let's avoid changing CPUID bits under the host
|
||||
* kernel's feet.
|
||||
* TSX_CTRL_CPUID_CLEAR is handled in the CPUID
|
||||
* interception. Keep the host value unchanged to avoid
|
||||
* changing CPUID bits under the host kernel's feet.
|
||||
*
|
||||
* hle=0, rtm=0, tsx_ctrl=1 can be found with some
|
||||
* combinations of new kernel and old userspace. If
|
||||
* those guests run on a tsx=off host, do allow guests
|
||||
* to use TSX_CTRL, but do not change the value on the
|
||||
* host so that TSX remains always disabled.
|
||||
*/
|
||||
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
if (boot_cpu_has(X86_FEATURE_RTM))
|
||||
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
else
|
||||
vmx->guest_uret_msrs[j].mask = 0;
|
||||
break;
|
||||
default:
|
||||
vmx->guest_uret_msrs[j].mask = -1ull;
|
||||
|
|
|
@ -1394,16 +1394,24 @@ static u64 kvm_get_arch_capabilities(void)
|
|||
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
||||
data |= ARCH_CAP_MDS_NO;
|
||||
|
||||
/*
|
||||
* On TAA affected systems:
|
||||
* - nothing to do if TSX is disabled on the host.
|
||||
* - we emulate TSX_CTRL if present on the host.
|
||||
* This lets the guest use VERW to clear CPU buffers.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM))
|
||||
data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
|
||||
else if (!boot_cpu_has_bug(X86_BUG_TAA))
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
/*
|
||||
* If RTM=0 because the kernel has disabled TSX, the host might
|
||||
* have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
|
||||
* and therefore knows that there cannot be TAA) but keep
|
||||
* TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
|
||||
* and we want to allow migrating those guests to tsx=off hosts.
|
||||
*/
|
||||
data &= ~ARCH_CAP_TAA_NO;
|
||||
} else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
data |= ARCH_CAP_TAA_NO;
|
||||
} else {
|
||||
/*
|
||||
* Nothing to do here; we emulate TSX_CTRL if present on the
|
||||
* host so the guest can choose between disabling TSX or
|
||||
* using VERW to clear CPU buffers.
|
||||
*/
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
@ -9616,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
*/
|
||||
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
|
||||
return false;
|
||||
if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
|
||||
return false;
|
||||
} else {
|
||||
/*
|
||||
* Not in 64-bit mode: EFER.LMA is clear and the code
|
||||
|
@ -9993,6 +10003,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|||
fx_init(vcpu);
|
||||
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
|
||||
|
||||
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
|
||||
|
||||
|
@ -10494,7 +10505,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
|||
return 0;
|
||||
|
||||
old_npages = slot->npages;
|
||||
hva = 0;
|
||||
hva = slot->userspace_addr;
|
||||
}
|
||||
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
||||
|
|
|
@ -425,6 +425,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
|
|||
__reserved_bits |= X86_CR4_UMIP; \
|
||||
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
|
||||
__reserved_bits |= X86_CR4_VMXE; \
|
||||
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
|
||||
__reserved_bits |= X86_CR4_PCIDE; \
|
||||
__reserved_bits; \
|
||||
})
|
||||
|
||||
|
|
|
@ -382,6 +382,7 @@ bool sev_active(void)
|
|||
{
|
||||
return sev_status & MSR_AMD64_SEV_ENABLED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sev_active);
|
||||
|
||||
/* Needs to be called from non-instrumentable code */
|
||||
bool noinstr sev_es_active(void)
|
||||
|
|
|
@ -115,31 +115,12 @@ void efi_sync_low_kernel_mappings(void)
|
|||
pud_t *pud_k, *pud_efi;
|
||||
pgd_t *efi_pgd = efi_mm.pgd;
|
||||
|
||||
/*
|
||||
* We can share all PGD entries apart from the one entry that
|
||||
* covers the EFI runtime mapping space.
|
||||
*
|
||||
* Make sure the EFI runtime region mappings are guaranteed to
|
||||
* only span a single PGD entry and that the entry also maps
|
||||
* other important kernel regions.
|
||||
*/
|
||||
MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
|
||||
MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
|
||||
(EFI_VA_END & PGDIR_MASK));
|
||||
|
||||
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
|
||||
pgd_k = pgd_offset_k(PAGE_OFFSET);
|
||||
|
||||
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
|
||||
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
|
||||
|
||||
/*
|
||||
* As with PGDs, we share all P4D entries apart from the one entry
|
||||
* that covers the EFI runtime mapping space.
|
||||
*/
|
||||
BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
|
||||
BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
|
||||
|
||||
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
|
||||
pgd_k = pgd_offset_k(EFI_VA_END);
|
||||
p4d_efi = p4d_offset(pgd_efi, 0);
|
||||
|
|
|
@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
|||
* limit 'something'.
|
||||
*/
|
||||
/* no more than 50% of tags for async I/O */
|
||||
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
|
||||
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
|
||||
/*
|
||||
* no more than 75% of tags for sync writes (25% extra tags
|
||||
* w.r.t. async I/O, to prevent async I/O from starving sync
|
||||
* writes)
|
||||
*/
|
||||
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
|
||||
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
|
||||
|
||||
/*
|
||||
* In-word depths in case some bfq_queue is being weight-
|
||||
|
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
|||
* shortage.
|
||||
*/
|
||||
/* no more than ~18% of tags for async I/O */
|
||||
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
|
||||
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
|
||||
/* no more than ~37% of tags for sync writes (~20% extra tags) */
|
||||
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
|
||||
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
for (j = 0; j < 2; j++)
|
||||
|
|
|
@ -2269,40 +2269,24 @@ static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
|
|||
|
||||
/* enough info to uniquely specify an interleave set */
|
||||
struct nfit_set_info {
|
||||
struct nfit_set_info_map {
|
||||
u64 region_offset;
|
||||
u32 serial_number;
|
||||
u32 pad;
|
||||
} mapping[0];
|
||||
u64 region_offset;
|
||||
u32 serial_number;
|
||||
u32 pad;
|
||||
};
|
||||
|
||||
struct nfit_set_info2 {
|
||||
struct nfit_set_info_map2 {
|
||||
u64 region_offset;
|
||||
u32 serial_number;
|
||||
u16 vendor_id;
|
||||
u16 manufacturing_date;
|
||||
u8 manufacturing_location;
|
||||
u8 reserved[31];
|
||||
} mapping[0];
|
||||
u64 region_offset;
|
||||
u32 serial_number;
|
||||
u16 vendor_id;
|
||||
u16 manufacturing_date;
|
||||
u8 manufacturing_location;
|
||||
u8 reserved[31];
|
||||
};
|
||||
|
||||
static size_t sizeof_nfit_set_info(int num_mappings)
|
||||
{
|
||||
return sizeof(struct nfit_set_info)
|
||||
+ num_mappings * sizeof(struct nfit_set_info_map);
|
||||
}
|
||||
|
||||
static size_t sizeof_nfit_set_info2(int num_mappings)
|
||||
{
|
||||
return sizeof(struct nfit_set_info2)
|
||||
+ num_mappings * sizeof(struct nfit_set_info_map2);
|
||||
}
|
||||
|
||||
static int cmp_map_compat(const void *m0, const void *m1)
|
||||
{
|
||||
const struct nfit_set_info_map *map0 = m0;
|
||||
const struct nfit_set_info_map *map1 = m1;
|
||||
const struct nfit_set_info *map0 = m0;
|
||||
const struct nfit_set_info *map1 = m1;
|
||||
|
||||
return memcmp(&map0->region_offset, &map1->region_offset,
|
||||
sizeof(u64));
|
||||
|
@ -2310,8 +2294,8 @@ static int cmp_map_compat(const void *m0, const void *m1)
|
|||
|
||||
static int cmp_map(const void *m0, const void *m1)
|
||||
{
|
||||
const struct nfit_set_info_map *map0 = m0;
|
||||
const struct nfit_set_info_map *map1 = m1;
|
||||
const struct nfit_set_info *map0 = m0;
|
||||
const struct nfit_set_info *map1 = m1;
|
||||
|
||||
if (map0->region_offset < map1->region_offset)
|
||||
return -1;
|
||||
|
@ -2322,8 +2306,8 @@ static int cmp_map(const void *m0, const void *m1)
|
|||
|
||||
static int cmp_map2(const void *m0, const void *m1)
|
||||
{
|
||||
const struct nfit_set_info_map2 *map0 = m0;
|
||||
const struct nfit_set_info_map2 *map1 = m1;
|
||||
const struct nfit_set_info2 *map0 = m0;
|
||||
const struct nfit_set_info2 *map1 = m1;
|
||||
|
||||
if (map0->region_offset < map1->region_offset)
|
||||
return -1;
|
||||
|
@ -2361,22 +2345,22 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
|||
return -ENOMEM;
|
||||
import_guid(&nd_set->type_guid, spa->range_guid);
|
||||
|
||||
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
|
||||
info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
|
||||
info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
|
||||
if (!info2)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
|
||||
struct nfit_set_info_map *map = &info->mapping[i];
|
||||
struct nfit_set_info_map2 *map2 = &info2->mapping[i];
|
||||
struct nvdimm *nvdimm = mapping->nvdimm;
|
||||
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
||||
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
|
||||
spa->range_index, i);
|
||||
struct nfit_set_info *map = &info[i];
|
||||
struct nfit_set_info2 *map2 = &info2[i];
|
||||
struct acpi_nfit_memory_map *memdev =
|
||||
memdev_from_spa(acpi_desc, spa->range_index, i);
|
||||
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
|
||||
|
||||
if (!memdev || !nfit_mem->dcr) {
|
||||
|
@ -2395,23 +2379,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
|||
}
|
||||
|
||||
/* v1.1 namespaces */
|
||||
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
|
||||
cmp_map, NULL);
|
||||
nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
|
||||
sort(info, nr, sizeof(*info), cmp_map, NULL);
|
||||
nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0);
|
||||
|
||||
/* v1.2 namespaces */
|
||||
sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
|
||||
cmp_map2, NULL);
|
||||
nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
|
||||
sort(info2, nr, sizeof(*info2), cmp_map2, NULL);
|
||||
nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0);
|
||||
|
||||
/* support v1.1 namespaces created with the wrong sort order */
|
||||
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
|
||||
cmp_map_compat, NULL);
|
||||
nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
|
||||
sort(info, nr, sizeof(*info), cmp_map_compat, NULL);
|
||||
nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0);
|
||||
|
||||
/* record the result of the sort for the mapping position */
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct nfit_set_info_map2 *map2 = &info2->mapping[i];
|
||||
struct nfit_set_info2 *map2 = &info2[i];
|
||||
int j;
|
||||
|
||||
for (j = 0; j < nr; j++) {
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/sched/task.h>
|
||||
|
||||
#include "amdgpu_object.h"
|
||||
#include "amdgpu_gem.h"
|
||||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_dma_buf.h"
|
||||
|
@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
struct sg_table *sg = NULL;
|
||||
uint64_t user_addr = 0;
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_param bp;
|
||||
struct drm_gem_object *gobj;
|
||||
u32 domain, alloc_domain;
|
||||
u64 alloc_flags;
|
||||
int ret;
|
||||
|
@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
|
||||
va, size, domain_string(alloc_domain));
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = size;
|
||||
bp.byte_align = 1;
|
||||
bp.domain = alloc_domain;
|
||||
bp.flags = alloc_flags;
|
||||
bp.type = bo_type;
|
||||
bp.resv = NULL;
|
||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||
ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
|
||||
bo_type, NULL, &gobj);
|
||||
if (ret) {
|
||||
pr_debug("Failed to create BO on domain %s. ret %d\n",
|
||||
domain_string(alloc_domain), ret);
|
||||
domain_string(alloc_domain), ret);
|
||||
goto err_bo_create;
|
||||
}
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
if (bo_type == ttm_bo_type_sg) {
|
||||
bo->tbo.sg = sg;
|
||||
bo->tbo.ttm->sg = sg;
|
||||
|
|
|
@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
|||
struct drm_file *file_priv,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t domains;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
|
||||
|
@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
|||
}
|
||||
|
||||
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
|
||||
if (obj->import_attach) {
|
||||
bo = gem_to_amdgpu_bo(obj);
|
||||
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
|
||||
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
|
|
@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
}
|
||||
|
||||
retry:
|
||||
initial_domain = (u32)(0xffffffff & args->in.domains);
|
||||
retry:
|
||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
initial_domain,
|
||||
flags, ttm_bo_type_device, resv, &gobj);
|
||||
|
|
|
@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
return -EINVAL;
|
||||
|
||||
/* A shared bo cannot be migrated to VRAM */
|
||||
if (bo->prime_shared_count) {
|
||||
if (bo->prime_shared_count || bo->tbo.base.import_attach) {
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
else
|
||||
|
|
|
@ -99,6 +99,10 @@
|
|||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
|
||||
#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
|
||||
#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
|
||||
#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
|
||||
|
@ -4936,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
|
|||
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
|
||||
{
|
||||
/* TCCs are global (not instanced). */
|
||||
uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
|
||||
uint32_t tcc_disable;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
|
||||
break;
|
||||
default:
|
||||
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gfx.config.tcc_disabled_mask =
|
||||
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
|
||||
|
|
|
@ -1833,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link)
|
|||
link->type = dc_connection_none;
|
||||
prev_sink = link->local_sink;
|
||||
|
||||
if (prev_sink != NULL)
|
||||
dc_sink_retain(prev_sink);
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
|
||||
switch (link->connector_signal) {
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A: {
|
||||
|
@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
|||
dc_commit_updates_for_stream(
|
||||
dm->dc, bundle->surface_updates,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k], &bundle->stream_update, dc_state);
|
||||
dc_state->streams[k], &bundle->stream_update);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
|
@ -1965,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link)
|
|||
|
||||
stream_update.stream = stream_state;
|
||||
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
|
||||
stream_state, &stream_update,
|
||||
stream_state->ctx->dc->current_state);
|
||||
stream_state, &stream_update);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
||||
|
@ -2330,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
* TODO: check if we still need the S3 mode update workaround.
|
||||
* If yes, put it here.
|
||||
*/
|
||||
if (aconnector->dc_sink)
|
||||
if (aconnector->dc_sink) {
|
||||
amdgpu_dm_update_freesync_caps(connector, NULL);
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
}
|
||||
|
||||
aconnector->dc_sink = sink;
|
||||
dc_sink_retain(aconnector->dc_sink);
|
||||
|
@ -2347,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
|
||||
drm_connector_update_edid_property(connector,
|
||||
aconnector->edid);
|
||||
drm_add_edid_modes(connector, aconnector->edid);
|
||||
|
||||
if (aconnector->dc_link->aux_mode)
|
||||
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
|
||||
aconnector->edid);
|
||||
|
@ -7549,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
struct drm_crtc *pcrtc,
|
||||
bool wait_for_vblank)
|
||||
{
|
||||
uint32_t i;
|
||||
int i;
|
||||
uint64_t timestamp_ns;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
|
@ -7590,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
amdgpu_dm_commit_cursors(state);
|
||||
|
||||
/* update planes when needed */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
struct drm_crtc *crtc = new_plane_state->crtc;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct drm_framebuffer *fb = new_plane_state->fb;
|
||||
|
@ -7813,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
bundle->surface_updates,
|
||||
planes_count,
|
||||
acrtc_state->stream,
|
||||
&bundle->stream_update,
|
||||
dc_state);
|
||||
&bundle->stream_update);
|
||||
|
||||
/**
|
||||
* Enable or disable the interrupts on the backend.
|
||||
|
@ -8150,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct dc_surface_update dummy_updates[MAX_SURFACES];
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
struct dc_info_packet hdr_packet;
|
||||
struct dc_stream_status *status = NULL;
|
||||
bool abm_changed, hdr_changed, scaling_changed;
|
||||
|
||||
memset(&dummy_updates, 0, sizeof(dummy_updates));
|
||||
memset(&surface_updates, 0, sizeof(surface_updates));
|
||||
memset(&stream_update, 0, sizeof(stream_update));
|
||||
|
||||
if (acrtc) {
|
||||
|
@ -8213,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
* To fix this, DC should permit updating only stream properties.
|
||||
*/
|
||||
for (j = 0; j < status->plane_count; j++)
|
||||
dummy_updates[j].surface = status->plane_states[0];
|
||||
surface_updates[j].surface = status->plane_states[j];
|
||||
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_commit_updates_for_stream(dm->dc,
|
||||
dummy_updates,
|
||||
surface_updates,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
&stream_update,
|
||||
dc_state);
|
||||
&stream_update);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
|
@ -8359,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
|
|||
|
||||
ret = PTR_ERR_OR_ZERO(conn_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto out;
|
||||
|
||||
/* Attach crtc to drm_atomic_state*/
|
||||
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(crtc_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto out;
|
||||
|
||||
/* force a restore */
|
||||
crtc_state->mode_changed = true;
|
||||
|
@ -8376,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
|
|||
|
||||
ret = PTR_ERR_OR_ZERO(plane_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
goto out;
|
||||
|
||||
/* Call commit internally with the state we just constructed */
|
||||
ret = drm_atomic_commit(state);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
out:
|
||||
drm_atomic_state_put(state);
|
||||
if (ret)
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -833,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
|||
if (computed_streams[i])
|
||||
continue;
|
||||
|
||||
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
|
@ -850,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
|||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->timing.flags.DSC == 1)
|
||||
dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
|
||||
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state)
|
||||
struct dc_stream_update *stream_update)
|
||||
{
|
||||
const struct dc_stream_status *stream_status;
|
||||
enum surface_update_type update_type;
|
||||
|
@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
|
||||
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
struct dc_plane_state *new_planes[MAX_SURFACES];
|
||||
|
||||
memset(new_planes, 0, sizeof(new_planes));
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
new_planes[i] = srf_updates[i].surface;
|
||||
|
||||
/* initialize scratch memory for building context */
|
||||
context = dc_create_state(dc);
|
||||
|
@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
return;
|
||||
}
|
||||
|
||||
dc_resource_state_copy_construct(state, context);
|
||||
dc_resource_state_copy_construct(
|
||||
dc->current_state, context);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
|
||||
new_pipe->plane_state->force_full_update = true;
|
||||
/*remove old surfaces from context */
|
||||
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
|
||||
DC_ERROR("Failed to remove streams for new validate context!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* add surface to context */
|
||||
if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
|
||||
DC_ERROR("Failed to add streams for new validate context!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -892,14 +892,14 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
|
|||
|
||||
switch (dpcd_aux_read_interval) {
|
||||
case 0x01:
|
||||
aux_rd_interval_us = 400;
|
||||
break;
|
||||
case 0x02:
|
||||
aux_rd_interval_us = 4000;
|
||||
break;
|
||||
case 0x03:
|
||||
case 0x02:
|
||||
aux_rd_interval_us = 8000;
|
||||
break;
|
||||
case 0x03:
|
||||
aux_rd_interval_us = 12000;
|
||||
break;
|
||||
case 0x04:
|
||||
aux_rd_interval_us = 16000;
|
||||
break;
|
||||
|
|
|
@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state);
|
||||
struct dc_stream_update *stream_update);
|
||||
/*
|
||||
* Log the current stream state.
|
||||
*/
|
||||
|
|
|
@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
|
|||
DCN20_CLK_SRC_PLL0,
|
||||
DCN20_CLK_SRC_PLL1,
|
||||
DCN20_CLK_SRC_PLL2,
|
||||
DCN20_CLK_SRC_PLL3,
|
||||
DCN20_CLK_SRC_PLL4,
|
||||
DCN20_CLK_SRC_TOTAL_DCN21
|
||||
};
|
||||
|
||||
|
@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct(
|
|||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
&clk_src_regs[2], false);
|
||||
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
|
||||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs[3], false);
|
||||
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
|
||||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
||||
&clk_src_regs[4], false);
|
||||
|
||||
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
|
||||
|
||||
|
|
|
@ -591,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
|
||||
gpu_metrics->average_cpu_power = metrics.Power[0];
|
||||
gpu_metrics->average_soc_power = metrics.Power[1];
|
||||
gpu_metrics->average_gfx_power = metrics.Power[2];
|
||||
memcpy(&gpu_metrics->average_core_power[0],
|
||||
&metrics.CorePower[0],
|
||||
sizeof(uint16_t) * 8);
|
||||
|
||||
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
|
||||
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
|
||||
gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
|
||||
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
|
||||
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
|
||||
gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
|
||||
|
||||
memcpy(&gpu_metrics->current_coreclk[0],
|
||||
&metrics.CoreFrequency[0],
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <sound/hdmi-codec.h>
|
||||
|
||||
|
@ -36,6 +37,7 @@ struct lt9611uxc {
|
|||
struct mutex ocm_lock;
|
||||
|
||||
struct wait_queue_head wq;
|
||||
struct work_struct work;
|
||||
|
||||
struct device_node *dsi0_node;
|
||||
struct device_node *dsi1_node;
|
||||
|
@ -52,6 +54,8 @@ struct lt9611uxc {
|
|||
|
||||
bool hpd_supported;
|
||||
bool edid_read;
|
||||
/* can be accessed from different threads, so protect this with ocm_lock */
|
||||
bool hdmi_connected;
|
||||
uint8_t fw_version;
|
||||
};
|
||||
|
||||
|
@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
|
|||
if (irq_status)
|
||||
regmap_write(lt9611uxc->regmap, 0xb022, 0);
|
||||
|
||||
lt9611uxc_unlock(lt9611uxc);
|
||||
|
||||
if (irq_status & BIT(0))
|
||||
if (irq_status & BIT(0)) {
|
||||
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
|
||||
|
||||
if (irq_status & BIT(1)) {
|
||||
if (lt9611uxc->connector.dev)
|
||||
drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
|
||||
else
|
||||
drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1)));
|
||||
wake_up_all(<9611uxc->wq);
|
||||
}
|
||||
|
||||
if (irq_status & BIT(1)) {
|
||||
lt9611uxc->hdmi_connected = hpd_status & BIT(1);
|
||||
schedule_work(<9611uxc->work);
|
||||
}
|
||||
|
||||
lt9611uxc_unlock(lt9611uxc);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void lt9611uxc_hpd_work(struct work_struct *work)
|
||||
{
|
||||
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
|
||||
bool connected;
|
||||
|
||||
if (lt9611uxc->connector.dev)
|
||||
drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
|
||||
else {
|
||||
|
||||
mutex_lock(<9611uxc->ocm_lock);
|
||||
connected = lt9611uxc->hdmi_connected;
|
||||
mutex_unlock(<9611uxc->ocm_lock);
|
||||
|
||||
drm_bridge_hpd_notify(<9611uxc->bridge,
|
||||
connected ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected);
|
||||
}
|
||||
}
|
||||
|
||||
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
|
||||
{
|
||||
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
|
||||
|
@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
|
|||
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
|
||||
unsigned int reg_val = 0;
|
||||
int ret;
|
||||
int connected = 1;
|
||||
bool connected = true;
|
||||
|
||||
lt9611uxc_lock(lt9611uxc);
|
||||
|
||||
if (lt9611uxc->hpd_supported) {
|
||||
lt9611uxc_lock(lt9611uxc);
|
||||
ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val);
|
||||
lt9611uxc_unlock(lt9611uxc);
|
||||
|
||||
if (ret)
|
||||
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
|
||||
else
|
||||
connected = reg_val & BIT(1);
|
||||
}
|
||||
lt9611uxc->hdmi_connected = connected;
|
||||
|
||||
lt9611uxc_unlock(lt9611uxc);
|
||||
|
||||
return connected ? connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
|
@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
|
|||
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
|
||||
{
|
||||
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
|
||||
msecs_to_jiffies(100));
|
||||
msecs_to_jiffies(500));
|
||||
}
|
||||
|
||||
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
|
||||
|
@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
|
|||
ret = lt9611uxc_wait_for_edid(lt9611uxc);
|
||||
if (ret < 0) {
|
||||
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
return NULL;
|
||||
} else if (ret == 0) {
|
||||
dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
|
||||
|
@ -926,6 +956,8 @@ static int lt9611uxc_probe(struct i2c_client *client,
|
|||
lt9611uxc->fw_version = ret;
|
||||
|
||||
init_waitqueue_head(<9611uxc->wq);
|
||||
INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work);
|
||||
|
||||
ret = devm_request_threaded_irq(dev, client->irq, NULL,
|
||||
lt9611uxc_irq_thread_handler,
|
||||
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
|
||||
|
@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
|
|||
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
|
||||
|
||||
disable_irq(client->irq);
|
||||
flush_scheduled_work();
|
||||
lt9611uxc_audio_exit(lt9611uxc);
|
||||
drm_bridge_remove(<9611uxc->bridge);
|
||||
|
||||
|
|
|
@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
|
||||
/**
|
||||
* drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
|
||||
* @link_rate: link rate in 10kbits/s units
|
||||
* @link_lane_count: lane count
|
||||
*
|
||||
* Calculate the total bandwidth of a MultiStream Transport link. The returned
|
||||
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
|
||||
* convert the number of PBNs required for a given stream to the number of
|
||||
* timeslots this stream requires in each MTP.
|
||||
*/
|
||||
int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
|
||||
{
|
||||
if (dp_link_bw == 0 || dp_link_count == 0)
|
||||
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
|
||||
dp_link_bw, dp_link_count);
|
||||
if (link_rate == 0 || link_lane_count == 0)
|
||||
DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
|
||||
link_rate, link_lane_count);
|
||||
|
||||
return dp_link_bw * dp_link_count / 2;
|
||||
/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
|
||||
return link_rate * link_lane_count / 54000;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
|
||||
|
||||
/**
|
||||
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
|
||||
|
@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
|
||||
mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
|
||||
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
|
||||
if (mgr->pbn_div == 0) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -2754,6 +2754,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
int n_entries, ln;
|
||||
u32 val;
|
||||
|
||||
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
|
||||
return;
|
||||
|
||||
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (level >= n_entries) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
|
@ -2890,6 +2893,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
u32 val, dpcnt_mask, dpcnt_val;
|
||||
int n_entries, ln;
|
||||
|
||||
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
|
||||
return;
|
||||
|
||||
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
if (level >= n_entries)
|
||||
|
@ -3531,6 +3537,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
|
|||
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
|
||||
}
|
||||
|
||||
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_phy_is_combo(i915, phy)) {
|
||||
bool lane_reversal =
|
||||
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
|
||||
|
||||
intel_combo_phy_power_up_lanes(i915, phy, false,
|
||||
crtc_state->lane_count,
|
||||
lane_reversal);
|
||||
}
|
||||
}
|
||||
|
||||
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -3620,14 +3643,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
|||
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
|
||||
* the used lanes of the DDI.
|
||||
*/
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
bool lane_reversal =
|
||||
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
|
||||
|
||||
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
|
||||
crtc_state->lane_count,
|
||||
lane_reversal);
|
||||
}
|
||||
intel_ddi_power_up_lanes(encoder, crtc_state);
|
||||
|
||||
/*
|
||||
* 7.g Configure and enable DDI_BUF_CTL
|
||||
|
@ -3712,14 +3728,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
|||
else
|
||||
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
bool lane_reversal =
|
||||
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
|
||||
|
||||
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
|
||||
crtc_state->lane_count,
|
||||
lane_reversal);
|
||||
}
|
||||
intel_ddi_power_up_lanes(encoder, crtc_state);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
|
||||
if (!is_mst)
|
||||
|
@ -4205,6 +4214,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
|||
intel_de_write(dev_priv, reg, val);
|
||||
}
|
||||
|
||||
intel_ddi_power_up_lanes(encoder, crtc_state);
|
||||
|
||||
/* In HDMI/DVI mode, the port width, and swing/emphasis values
|
||||
* are ignored so nothing special needs to be done besides
|
||||
* enabling the port.
|
||||
|
|
|
@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
|||
*/
|
||||
ret = i915_vma_pin_fence(vma);
|
||||
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_unpin(vma);
|
||||
vma = ERR_PTR(ret);
|
||||
goto err;
|
||||
}
|
||||
|
@ -2327,12 +2327,9 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
|||
|
||||
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
|
||||
{
|
||||
i915_gem_object_lock(vma->obj, NULL);
|
||||
if (flags & PLANE_HAS_FENCE)
|
||||
i915_vma_unpin_fence(vma);
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_gem_object_unlock(vma->obj);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
|
||||
|
@ -4807,6 +4804,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
|||
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
|
||||
} else if (fb->format->is_yuv) {
|
||||
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
|
||||
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
|
||||
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
|
||||
}
|
||||
|
||||
return plane_color_ctl;
|
||||
|
|
|
@ -4637,24 +4637,6 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
|
|||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
|
||||
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
|
||||
train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
|
||||
drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
|
||||
(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT,
|
||||
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
|
||||
" (max)" : "");
|
||||
|
||||
intel_dp->set_signal_levels(intel_dp, crtc_state);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -5703,7 +5685,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
|
|||
|
||||
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state);
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
|
||||
|
||||
intel_dp_phy_pattern_update(intel_dp, crtc_state);
|
||||
|
||||
|
|
|
@ -96,9 +96,6 @@ void
|
|||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u8 dp_train_pat);
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
u8 *link_bw, u8 *rate_select);
|
||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
|
||||
|
|
|
@ -334,6 +334,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
|||
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
|
||||
}
|
||||
|
||||
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
char phy_name[10];
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
|
||||
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
|
||||
train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
|
||||
(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT,
|
||||
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
|
||||
" (max)" : "",
|
||||
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
|
||||
|
||||
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
|
||||
intel_dp->set_signal_levels(intel_dp, crtc_state);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -341,7 +362,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
|
|||
u8 dp_train_pat)
|
||||
{
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state);
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
|
||||
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
|
||||
}
|
||||
|
||||
|
@ -355,7 +376,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
|
|||
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
|
||||
int ret;
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state);
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
|
||||
intel_dp->train_set, crtc_state->lane_count);
|
||||
|
|
|
@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE]);
|
||||
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
|
||||
|
|
|
@ -69,7 +69,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
|||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
|
||||
connector->port,
|
||||
crtc_state->pbn, 0);
|
||||
crtc_state->pbn,
|
||||
drm_dp_get_vc_payload_bw(crtc_state->port_clock,
|
||||
crtc_state->lane_count));
|
||||
if (slots == -EDEADLK)
|
||||
return slots;
|
||||
if (slots >= 0)
|
||||
|
|
|
@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
|||
intel_frontbuffer_flip_complete(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
|
||||
|
@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
return 0;
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_unpin(vma);
|
||||
out_pin_section:
|
||||
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
|
|
|
@ -618,13 +618,19 @@ skl_program_scaler(struct intel_plane *plane,
|
|||
|
||||
/* Preoffset values for YUV to RGB Conversion */
|
||||
#define PREOFF_YUV_TO_RGB_HI 0x1800
|
||||
#define PREOFF_YUV_TO_RGB_ME 0x1F00
|
||||
#define PREOFF_YUV_TO_RGB_ME 0x0000
|
||||
#define PREOFF_YUV_TO_RGB_LO 0x1800
|
||||
|
||||
#define ROFF(x) (((x) & 0xffff) << 16)
|
||||
#define GOFF(x) (((x) & 0xffff) << 0)
|
||||
#define BOFF(x) (((x) & 0xffff) << 16)
|
||||
|
||||
/*
|
||||
* Programs the input color space conversion stage for ICL HDR planes.
|
||||
* Note that it is assumed that this stage always happens after YUV
|
||||
* range correction. Thus, the input to this stage is assumed to be
|
||||
* in full-range YCbCr.
|
||||
*/
|
||||
static void
|
||||
icl_program_input_csc(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -672,52 +678,7 @@ icl_program_input_csc(struct intel_plane *plane,
|
|||
0x0, 0x7800, 0x7F10,
|
||||
},
|
||||
};
|
||||
|
||||
/* Matrix for Limited Range to Full Range Conversion */
|
||||
static const u16 input_csc_matrix_lr[][9] = {
|
||||
/*
|
||||
* BT.601 Limted range YCbCr -> full range RGB
|
||||
* The matrix required is :
|
||||
* [1.164384, 0.000, 1.596027,
|
||||
* 1.164384, -0.39175, -0.812813,
|
||||
* 1.164384, 2.017232, 0.0000]
|
||||
*/
|
||||
[DRM_COLOR_YCBCR_BT601] = {
|
||||
0x7CC8, 0x7950, 0x0,
|
||||
0x8D00, 0x7950, 0x9C88,
|
||||
0x0, 0x7950, 0x6810,
|
||||
},
|
||||
/*
|
||||
* BT.709 Limited range YCbCr -> full range RGB
|
||||
* The matrix required is :
|
||||
* [1.164384, 0.000, 1.792741,
|
||||
* 1.164384, -0.213249, -0.532909,
|
||||
* 1.164384, 2.112402, 0.0000]
|
||||
*/
|
||||
[DRM_COLOR_YCBCR_BT709] = {
|
||||
0x7E58, 0x7950, 0x0,
|
||||
0x8888, 0x7950, 0xADA8,
|
||||
0x0, 0x7950, 0x6870,
|
||||
},
|
||||
/*
|
||||
* BT.2020 Limited range YCbCr -> full range RGB
|
||||
* The matrix required is :
|
||||
* [1.164, 0.000, 1.678,
|
||||
* 1.164, -0.1873, -0.6504,
|
||||
* 1.164, 2.1417, 0.0000]
|
||||
*/
|
||||
[DRM_COLOR_YCBCR_BT2020] = {
|
||||
0x7D70, 0x7950, 0x0,
|
||||
0x8A68, 0x7950, 0xAC00,
|
||||
0x0, 0x7950, 0x6890,
|
||||
},
|
||||
};
|
||||
const u16 *csc;
|
||||
|
||||
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
|
||||
csc = input_csc_matrix[plane_state->hw.color_encoding];
|
||||
else
|
||||
csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
|
||||
const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
|
||||
|
||||
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
|
||||
ROFF(csc[0]) | GOFF(csc[1]));
|
||||
|
@ -734,14 +695,8 @@ icl_program_input_csc(struct intel_plane *plane,
|
|||
|
||||
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
|
||||
PREOFF_YUV_TO_RGB_HI);
|
||||
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
|
||||
intel_de_write_fw(dev_priv,
|
||||
PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
|
||||
0);
|
||||
else
|
||||
intel_de_write_fw(dev_priv,
|
||||
PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
|
||||
PREOFF_YUV_TO_RGB_ME);
|
||||
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
|
||||
PREOFF_YUV_TO_RGB_ME);
|
||||
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
|
||||
PREOFF_YUV_TO_RGB_LO);
|
||||
intel_de_write_fw(dev_priv,
|
||||
|
|
|
@ -387,48 +387,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
return vma;
|
||||
}
|
||||
|
||||
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (list_empty(&obj->vma.list))
|
||||
return;
|
||||
|
||||
mutex_lock(&i915->ggtt.vm.mutex);
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
mutex_unlock(&i915->ggtt.vm.mutex);
|
||||
|
||||
if (i915_gem_object_is_shrinkable(obj)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED &&
|
||||
!atomic_read(&obj->mm.shrink_pin))
|
||||
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
|
||||
{
|
||||
/* Bump the LRU to try and avoid premature eviction whilst flipping */
|
||||
i915_gem_object_bump_inactive_ggtt(vma->obj);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves a single object to the CPU read, and possibly write domain.
|
||||
* @obj: object to act on
|
||||
|
@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
|
||||
|
||||
/* And bump the LRU for this access */
|
||||
i915_gem_object_bump_inactive_ggtt(obj);
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
if (write_domain)
|
||||
|
|
|
@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
u32 alignment,
|
||||
const struct i915_ggtt_view *view,
|
||||
unsigned int flags);
|
||||
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
|
||||
|
||||
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
|
||||
|
|
|
@ -187,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
|
|||
intel_engine_add_retire(b->irq_engine, tl);
|
||||
}
|
||||
|
||||
static bool __signal_request(struct i915_request *rq)
|
||||
{
|
||||
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
|
||||
|
||||
if (!__dma_fence_signal(&rq->fence)) {
|
||||
i915_request_put(rq);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct llist_node *
|
||||
slist_add(struct llist_node *node, struct llist_node *head)
|
||||
{
|
||||
|
@ -269,9 +257,11 @@ static void signal_irq_work(struct irq_work *work)
|
|||
release = remove_signaling_context(b, ce);
|
||||
spin_unlock(&ce->signal_lock);
|
||||
|
||||
if (__signal_request(rq))
|
||||
if (__dma_fence_signal(&rq->fence))
|
||||
/* We own signal_node now, xfer to local list */
|
||||
signal = slist_add(&rq->signal_node, signal);
|
||||
else
|
||||
i915_request_put(rq);
|
||||
|
||||
if (release) {
|
||||
add_retire(b, ce->timeline);
|
||||
|
@ -358,6 +348,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
|
|||
kfree(b);
|
||||
}
|
||||
|
||||
static void irq_signal_request(struct i915_request *rq,
|
||||
struct intel_breadcrumbs *b)
|
||||
{
|
||||
if (!__dma_fence_signal(&rq->fence))
|
||||
return;
|
||||
|
||||
i915_request_get(rq);
|
||||
if (llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
static void insert_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
|
||||
|
@ -367,17 +368,13 @@ static void insert_breadcrumb(struct i915_request *rq)
|
|||
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
|
||||
return;
|
||||
|
||||
i915_request_get(rq);
|
||||
|
||||
/*
|
||||
* If the request is already completed, we can transfer it
|
||||
* straight onto a signaled list, and queue the irq worker for
|
||||
* its signal completion.
|
||||
*/
|
||||
if (__i915_request_is_complete(rq)) {
|
||||
if (__signal_request(rq) &&
|
||||
llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
irq_signal_request(rq, b);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -408,6 +405,8 @@ static void insert_breadcrumb(struct i915_request *rq)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
i915_request_get(rq);
|
||||
list_add_rcu(&rq->signal_link, pos);
|
||||
GEM_BUG_ON(!check_signal_order(ce, rq));
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
|
||||
|
@ -448,19 +447,25 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
|||
|
||||
void i915_request_cancel_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
|
||||
struct intel_context *ce = rq->context;
|
||||
bool release;
|
||||
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
|
||||
return;
|
||||
|
||||
spin_lock(&ce->signal_lock);
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
|
||||
spin_unlock(&ce->signal_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
list_del_rcu(&rq->signal_link);
|
||||
release = remove_signaling_context(rq->engine->breadcrumbs, ce);
|
||||
release = remove_signaling_context(b, ce);
|
||||
spin_unlock(&ce->signal_lock);
|
||||
if (release)
|
||||
intel_context_put(ce);
|
||||
|
||||
if (__i915_request_is_complete(rq))
|
||||
irq_signal_request(rq, b);
|
||||
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
|
|
|
@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
if (!ttm_dma)
|
||||
return;
|
||||
|
@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->num_pages; i++)
|
||||
for (i = 0; i < ttm_dma->num_pages; ++i) {
|
||||
struct page *p = ttm_dma->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
|
||||
if (++p != ttm_dma->pages[j])
|
||||
break;
|
||||
|
||||
++num_pages;
|
||||
}
|
||||
dma_sync_single_for_device(drm->dev->dev,
|
||||
ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
num_pages * PAGE_SIZE, DMA_TO_DEVICE);
|
||||
i += num_pages;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
if (!ttm_dma)
|
||||
return;
|
||||
|
@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->num_pages; i++)
|
||||
for (i = 0; i < ttm_dma->num_pages; ++i) {
|
||||
struct page *p = ttm_dma->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
|
||||
if (++p != ttm_dma->pages[j])
|
||||
break;
|
||||
|
||||
++num_pages;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
i += num_pages;
|
||||
}
|
||||
}
|
||||
|
||||
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
|
||||
|
|
|
@ -84,7 +84,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
|
|||
* put_page() on a TTM allocated page is illegal.
|
||||
*/
|
||||
if (order)
|
||||
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
|
||||
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
|
||||
__GFP_KSWAPD_RECLAIM;
|
||||
|
||||
if (!pool->use_dma_alloc) {
|
||||
|
|
|
@ -215,9 +215,17 @@ static const struct xpad_device {
|
|||
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
|
||||
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
|
||||
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
|
||||
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
|
||||
|
@ -296,6 +304,9 @@ static const struct xpad_device {
|
|||
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
|
||||
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
|
||||
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
|
||||
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
|
||||
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
|
||||
|
@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
|
|||
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
|
||||
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
|
||||
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
|
||||
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
|
||||
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
|
||||
|
||||
static const struct spi_device_id ariel_pwrbutton_id_table[] = {
|
||||
{ "wyse-ariel-ec-input", 0 },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
|
||||
|
||||
static struct spi_driver ariel_pwrbutton_driver = {
|
||||
.driver = {
|
||||
.name = "dell-wyse-ariel-ec-input",
|
||||
|
|
|
@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
|
||||
|
|
|
@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
|
|||
{ .id = "5663", .data = >1x_chip_data },
|
||||
{ .id = "5688", .data = >1x_chip_data },
|
||||
{ .id = "917S", .data = >1x_chip_data },
|
||||
{ .id = "9286", .data = >1x_chip_data },
|
||||
|
||||
{ .id = "911", .data = >911_chip_data },
|
||||
{ .id = "9271", .data = >911_chip_data },
|
||||
|
@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = {
|
|||
{ .compatible = "goodix,gt927" },
|
||||
{ .compatible = "goodix,gt9271" },
|
||||
{ .compatible = "goodix,gt928" },
|
||||
{ .compatible = "goodix,gt9286" },
|
||||
{ .compatible = "goodix,gt967" },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -29,11 +29,13 @@ struct ili2xxx_chip {
|
|||
void *buf, size_t len);
|
||||
int (*get_touch_data)(struct i2c_client *client, u8 *data);
|
||||
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
|
||||
unsigned int *x, unsigned int *y);
|
||||
unsigned int *x, unsigned int *y,
|
||||
unsigned int *z);
|
||||
bool (*continue_polling)(const u8 *data, bool touch);
|
||||
unsigned int max_touches;
|
||||
unsigned int resolution;
|
||||
bool has_calibrate_reg;
|
||||
bool has_pressure_reg;
|
||||
};
|
||||
|
||||
struct ili210x {
|
||||
|
@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
|
|||
|
||||
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
|
||||
unsigned int finger,
|
||||
unsigned int *x, unsigned int *y)
|
||||
unsigned int *x, unsigned int *y,
|
||||
unsigned int *z)
|
||||
{
|
||||
if (touchdata[0] & BIT(finger))
|
||||
return false;
|
||||
|
@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
|
|||
|
||||
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
|
||||
unsigned int finger,
|
||||
unsigned int *x, unsigned int *y)
|
||||
unsigned int *x, unsigned int *y,
|
||||
unsigned int *z)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
|
@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
|
|||
|
||||
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
|
||||
unsigned int finger,
|
||||
unsigned int *x, unsigned int *y)
|
||||
unsigned int *x, unsigned int *y,
|
||||
unsigned int *z)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
|
@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
|
|||
|
||||
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
|
||||
unsigned int finger,
|
||||
unsigned int *x, unsigned int *y)
|
||||
unsigned int *x, unsigned int *y,
|
||||
unsigned int *z)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
|
@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
|
|||
|
||||
*x = val & 0x3fff;
|
||||
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
|
||||
*z = touchdata[1 + (finger * 5) + 4];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
|
|||
.continue_polling = ili251x_check_continue_polling,
|
||||
.max_touches = 10,
|
||||
.has_calibrate_reg = true,
|
||||
.has_pressure_reg = true,
|
||||
};
|
||||
|
||||
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
|
||||
|
@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
|
|||
struct input_dev *input = priv->input;
|
||||
int i;
|
||||
bool contact = false, touch;
|
||||
unsigned int x = 0, y = 0;
|
||||
unsigned int x = 0, y = 0, z = 0;
|
||||
|
||||
for (i = 0; i < priv->chip->max_touches; i++) {
|
||||
touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
|
||||
touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
|
||||
|
||||
input_mt_slot(input, i);
|
||||
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
|
||||
touchscreen_report_pos(input, &priv->prop, x, y, true);
|
||||
if (priv->chip->has_pressure_reg)
|
||||
input_report_abs(input, ABS_MT_PRESSURE, z);
|
||||
contact = true;
|
||||
}
|
||||
}
|
||||
|
@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
|
|||
max_xy = (chip->resolution ?: SZ_64K) - 1;
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
|
||||
if (priv->chip->has_pressure_reg)
|
||||
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
|
||||
touchscreen_parse_properties(input, true, &priv->prop);
|
||||
|
||||
error = input_mt_init_slots(input, priv->chip->max_touches,
|
||||
|
|
|
@ -26,6 +26,20 @@
|
|||
#define ST1232_TS_NAME "st1232-ts"
|
||||
#define ST1633_TS_NAME "st1633-ts"
|
||||
|
||||
#define REG_STATUS 0x01 /* Device Status | Error Code */
|
||||
|
||||
#define STATUS_NORMAL 0x00
|
||||
#define STATUS_INIT 0x01
|
||||
#define STATUS_ERROR 0x02
|
||||
#define STATUS_AUTO_TUNING 0x03
|
||||
#define STATUS_IDLE 0x04
|
||||
#define STATUS_POWER_DOWN 0x05
|
||||
|
||||
#define ERROR_NONE 0x00
|
||||
#define ERROR_INVALID_ADDRESS 0x10
|
||||
#define ERROR_INVALID_VALUE 0x20
|
||||
#define ERROR_INVALID_PLATFORM 0x30
|
||||
|
||||
#define REG_XY_RESOLUTION 0x04
|
||||
#define REG_XY_COORDINATES 0x12
|
||||
#define ST_TS_MAX_FINGERS 10
|
||||
|
@ -47,7 +61,8 @@ struct st1232_ts_data {
|
|||
u8 *read_buf;
|
||||
};
|
||||
|
||||
static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
|
||||
static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
|
||||
unsigned int n)
|
||||
{
|
||||
struct i2c_client *client = ts->client;
|
||||
struct i2c_msg msg[] = {
|
||||
|
@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
|
|||
{
|
||||
.addr = client->addr,
|
||||
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
|
||||
.len = ts->read_buf_len,
|
||||
.len = n,
|
||||
.buf = ts->read_buf,
|
||||
}
|
||||
};
|
||||
|
@ -72,6 +87,22 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
|
||||
{
|
||||
unsigned int retries;
|
||||
int error;
|
||||
|
||||
for (retries = 10; retries; retries--) {
|
||||
error = st1232_ts_read_data(ts, REG_STATUS, 1);
|
||||
if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
|
||||
return 0;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
|
||||
u16 *max_y)
|
||||
{
|
||||
|
@ -79,14 +110,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
|
|||
int error;
|
||||
|
||||
/* select resolution register */
|
||||
error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
|
||||
error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
buf = ts->read_buf;
|
||||
|
||||
*max_x = ((buf[0] & 0x0070) << 4) | buf[1];
|
||||
*max_y = ((buf[0] & 0x0007) << 8) | buf[2];
|
||||
*max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
|
||||
*max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -140,7 +171,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
|
|||
int count;
|
||||
int error;
|
||||
|
||||
error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
|
||||
error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
|
@ -251,6 +282,11 @@ static int st1232_ts_probe(struct i2c_client *client,
|
|||
input_dev->name = "st1232-touchscreen";
|
||||
input_dev->id.bustype = BUS_I2C;
|
||||
|
||||
/* Wait until device is ready */
|
||||
error = st1232_ts_wait_ready(ts);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Read resolution from the chip */
|
||||
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
|
||||
if (error) {
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include "sdio_cis.h"
|
||||
#include "sdio_ops.h"
|
||||
|
||||
#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
|
||||
|
||||
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
|
||||
const unsigned char *buf, unsigned size)
|
||||
{
|
||||
|
@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
|
|||
|
||||
do {
|
||||
unsigned char tpl_code, tpl_link;
|
||||
unsigned long timeout = jiffies +
|
||||
msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
|
||||
|
||||
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
|
||||
if (ret)
|
||||
|
@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
|
|||
prev = &this->next;
|
||||
|
||||
if (ret == -ENOENT) {
|
||||
if (time_after(jiffies, timeout))
|
||||
break;
|
||||
/* warn about unknown tuples */
|
||||
pr_warn_ratelimited("%s: queuing unknown"
|
||||
" CIS tuple 0x%02x (%u bytes)\n",
|
||||
|
|
|
@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
|
|||
return host->private;
|
||||
}
|
||||
|
||||
extern const struct dev_pm_ops sdhci_pltfm_pmops;
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int sdhci_pltfm_suspend(struct device *dev);
|
||||
int sdhci_pltfm_resume(struct device *dev);
|
||||
extern const struct dev_pm_ops sdhci_pltfm_pmops;
|
||||
#else
|
||||
static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
|
||||
static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
|
||||
|
|
|
@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
static DEVICE_ATTR_RO(state);
|
||||
|
||||
static ssize_t available_slots_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
|
||||
{
|
||||
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
|
||||
struct device *dev;
|
||||
ssize_t rc;
|
||||
u32 nfree;
|
||||
|
||||
if (!ndd)
|
||||
return -ENXIO;
|
||||
|
||||
dev = ndd->dev;
|
||||
nvdimm_bus_lock(dev);
|
||||
nfree = nd_label_nfree(ndd);
|
||||
if (nfree - 1 > nfree) {
|
||||
|
@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev,
|
|||
nvdimm_bus_unlock(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t available_slots_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t rc;
|
||||
|
||||
nd_device_lock(dev);
|
||||
rc = __available_slots_show(dev_get_drvdata(dev), buf);
|
||||
nd_device_unlock(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(available_slots);
|
||||
|
||||
__weak ssize_t security_show(struct device *dev,
|
||||
|
|
|
@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj,
|
|||
return a->mode;
|
||||
}
|
||||
|
||||
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
|
||||
|| a == &dev_attr_holder.attr
|
||||
|| a == &dev_attr_holder_class.attr
|
||||
|| a == &dev_attr_force_raw.attr
|
||||
|| a == &dev_attr_mode.attr)
|
||||
/* base is_namespace_io() attributes */
|
||||
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
|
||||
a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
|
||||
a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
|
||||
a == &dev_attr_resource.attr)
|
||||
return a->mode;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <linux/uio.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/nd.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include "pmem.h"
|
||||
|
|
|
@ -3242,6 +3242,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
|
||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
|
||||
|
@ -3259,6 +3261,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
|
||||
|
|
|
@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
|
|||
length = cmd->pdu_len;
|
||||
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
offset = cmd->rbytes_done;
|
||||
cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
|
||||
cmd->sg_idx = offset / PAGE_SIZE;
|
||||
sg_offset = offset % PAGE_SIZE;
|
||||
sg = &cmd->req.sg[cmd->sg_idx];
|
||||
|
||||
|
@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
|
|||
length -= iov_len;
|
||||
sg = sg_next(sg);
|
||||
iov++;
|
||||
sg_offset = 0;
|
||||
}
|
||||
|
||||
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
|
||||
|
|
|
@ -805,8 +805,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
|
|||
|
||||
spin_lock_irq(&rtc_lock);
|
||||
|
||||
/* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
|
||||
if ((CMOS_READ(RTC_VALID) & 0x7f) != 0) {
|
||||
/* Ensure that the RTC is accessible. Bit 6 must be 0! */
|
||||
if ((CMOS_READ(RTC_VALID) & 0x40) != 0) {
|
||||
spin_unlock_irq(&rtc_lock);
|
||||
dev_warn(dev, "not accessible\n");
|
||||
retval = -ENXIO;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue