Merge 5.14-rc3 into driver-core-next

We need the driver-core fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2021-07-27 09:22:08 +02:00
commit bdac4d8abb
308 changed files with 3430 additions and 1980 deletions

View File

@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
tagged pointers in this context is allowed with the exception of
``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
tagged pointers in this context is allowed with these exceptions:
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- The ``range.start``, ``start`` and ``dst`` arguments to the
``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
``userfaultfd()``, as fault addresses subsequently obtained by reading
the file descriptor will be untagged, which may otherwise confuse
tag-unaware programs.
NOTE: This behaviour changed in v5.14 and so some earlier kernels may
incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to

View File

@ -1,56 +0,0 @@
IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP.
This file documents platform glue layer for IMX.
Please see stmmac.txt for the other unchanged properties.
The device node has following properties.
Required properties:
- compatible: Should be "nxp,imx8mp-dwmac-eqos" to select glue layer
and "snps,dwmac-5.10a" to select IP version.
- clocks: Must contain a phandle for each entry in clock-names.
- clock-names: Should be "stmmaceth" for the host clock.
Should be "pclk" for the MAC apb clock.
Should be "ptp_ref" for the MAC timer clock.
Should be "tx" for the MAC RGMII TX clock:
Should be "mem" for EQOS MEM clock.
- "mem" clock is required for imx8dxl platform.
- "mem" clock is not required for imx8mp platform.
- interrupt-names: Should contain a list of interrupt names corresponding to
the interrupts in the interrupts property, if available.
Should be "macirq" for the main MAC IRQ
Should be "eth_wake_irq" for the IT which wake up system
- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which
encompases the GPR register, and the offset of the GPR register.
- required for imx8mp platform.
- is optional for imx8dxl platform.
Optional properties:
- intf_mode: is optional for imx8dxl platform.
- snps,rmii_refclk_ext: to select RMII reference clock from external.
Example:
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eth_wake_irq", "macirq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
<&clk IMX8MP_SYS_PLL2_100M>,
<&clk IMX8MP_SYS_PLL2_125M>;
assigned-clock-rates = <0>, <100000000>, <125000000>;
nvmem-cells = <&eth_mac0>;
nvmem-cell-names = "mac-address";
nvmem_macaddr_swap;
intf_mode = <&gpr 0x4>;
status = "disabled";
};

View File

@ -0,0 +1,93 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP i.MX8 DWMAC glue layer Device Tree Bindings
maintainers:
- Joakim Zhang <qiangqing.zhang@nxp.com>
# We need a select here so we don't match all nodes with 'snps,dwmac'
select:
properties:
compatible:
contains:
enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
required:
- compatible
allOf:
- $ref: "snps,dwmac.yaml#"
properties:
compatible:
oneOf:
- items:
- enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
- const: snps,dwmac-5.10a
clocks:
minItems: 3
maxItems: 5
items:
- description: MAC host clock
- description: MAC apb clock
- description: MAC timer clock
- description: MAC RGMII TX clock
- description: EQOS MEM clock
clock-names:
minItems: 3
maxItems: 5
contains:
enum:
- stmmaceth
- pclk
- ptp_ref
- tx
- mem
intf_mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Should be phandle/offset pair. The phandle to the syscon node which
encompases the GPR register, and the offset of the GPR register.
snps,rmii_refclk_ext:
$ref: /schemas/types.yaml#/definitions/flag
description:
To select RMII reference clock from external.
required:
- compatible
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/imx8mp-clock.h>
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
phy-mode = "rgmii";
status = "disabled";
};

View File

@ -28,6 +28,7 @@ select:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@ -82,6 +83,7 @@ properties:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@ -375,6 +377,7 @@ allOf:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac

View File

@ -57,12 +57,14 @@ properties:
maxItems: 1
power-domains:
deprecated: true
description:
Power domain to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain.
maxItems: 1
required-opps:
deprecated: true
description:
Performance state to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain. The

View File

@ -114,7 +114,7 @@ properties:
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
patternProperties:
port(@[0-9a-f]+)?:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false

View File

@ -243,8 +243,8 @@ Configuration Flags and Socket Options
These are the various configuration flags that can be used to control
and monitor the behavior of AF_XDP sockets.
XDP_COPY and XDP_ZERO_COPY bind flags
-------------------------------------
XDP_COPY and XDP_ZEROCOPY bind flags
------------------------------------
When you bind to a socket, the kernel will first try to use zero-copy
copy. If zero-copy is not supported, it will fall back on using copy
@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
like to force a certain mode, you can use the following flags. If you
pass the XDP_COPY flag to the bind call, the kernel will force the
socket into copy mode. If it cannot use copy mode, the bind call will
fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
socket into zero-copy mode or fail.
XDP_SHARED_UMEM bind flag

View File

@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
initial value when the blackhole issue goes away.
0 to disable the blackhole detection.
By default, it is set to 1hr.
By default, it is set to 0 (feature is disabled).
tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
The list consists of a primary key and an optional backup key. The

View File

@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
with the event, in nanoseconds. May be
modified by .usecs to have timestamps
interpreted as microseconds.
cpu int the cpu on which the event occurred.
common_cpu int the cpu on which the event occurred.
====================== ==== =======================================
Extended error information

View File

@ -445,7 +445,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
ACRN HYPERVISOR SERVICE MODULE
M: Shuo Liu <shuo.a.liu@intel.com>
M: Fei Li <fei1.li@intel.com>
L: acrn-dev@lists.projectacrn.org (subscribers-only)
S: Supported
W: https://projectacrn.org
@ -11758,6 +11758,7 @@ F: drivers/char/hw_random/mtk-rng.c
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang@mediatek.com>
M: Landen Chao <Landen.Chao@mediatek.com>
M: DENG Qingfang <dqfext@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mt7530.*
@ -19122,7 +19123,7 @@ M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
F: drivers/phy/hisilicon/phy-kirin970-usb3.c
F: drivers/phy/hisilicon/phy-hi3670-usb3.c
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
@ -19800,6 +19801,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
VMWARE VMCI DRIVER
M: Jorgen Hansen <jhansen@vmware.com>
M: Vishnu Dasa <vdasa@vmware.com>
L: linux-kernel@vger.kernel.org
L: pv-drivers@vmware.com (private)
S: Maintained
F: drivers/misc/vmw_vmci/
VMWARE VMMOUSE SUBDRIVER
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Opossums on Parade
# *DOCUMENTATION*

View File

@ -821,9 +821,9 @@ fec: ethernet@30be0000 {
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eth_wake_irq", "macirq";
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,

View File

@ -1063,7 +1063,7 @@ &usb2 {
status = "okay";
extcon = <&usb2_id>;
usb@7600000 {
dwc3@7600000 {
extcon = <&usb2_id>;
dr_mode = "otg";
maximum-speed = "high-speed";
@ -1074,7 +1074,7 @@ &usb3 {
status = "okay";
extcon = <&usb3_id>;
usb@6a00000 {
dwc3@6a00000 {
extcon = <&usb3_id>;
dr_mode = "otg";
};

View File

@ -443,7 +443,7 @@ usb_0: usb@8af8800 {
resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
dwc_0: usb@8a00000 {
dwc_0: dwc3@8a00000 {
compatible = "snps,dwc3";
reg = <0x8a00000 0xcd00>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
@ -484,7 +484,7 @@ usb_1: usb@8cf8800 {
resets = <&gcc GCC_USB1_BCR>;
status = "disabled";
dwc_1: usb@8c00000 {
dwc_1: dwc3@8c00000 {
compatible = "snps,dwc3";
reg = <0x8c00000 0xcd00>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -2566,7 +2566,7 @@ usb3: usb@6af8800 {
power-domains = <&gcc USB30_GDSC>;
status = "disabled";
usb@6a00000 {
dwc3@6a00000 {
compatible = "snps,dwc3";
reg = <0x06a00000 0xcc00>;
interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
@ -2873,7 +2873,7 @@ usb2: usb@76f8800 {
qcom,select-utmi-as-pipe-clk;
status = "disabled";
usb@7600000 {
dwc3@7600000 {
compatible = "snps,dwc3";
reg = <0x07600000 0xcc00>;
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -1964,7 +1964,7 @@ usb3: usb@a8f8800 {
resets = <&gcc GCC_USB_30_BCR>;
usb3_dwc3: usb@a800000 {
usb3_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xcd00>;
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -337,7 +337,7 @@ &usb2_phy_sec {
&usb3 {
status = "okay";
usb@7580000 {
dwc3@7580000 {
dr_mode = "host";
};
};

View File

@ -544,7 +544,7 @@ usb3: usb@7678800 {
assigned-clock-rates = <19200000>, <200000000>;
status = "disabled";
usb@7580000 {
dwc3@7580000 {
compatible = "snps,dwc3";
reg = <0x07580000 0xcd00>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@ -573,7 +573,7 @@ usb2: usb@79b8800 {
assigned-clock-rates = <19200000>, <133333333>;
status = "disabled";
usb@78c0000 {
dwc3@78c0000 {
compatible = "snps,dwc3";
reg = <0x078c0000 0xcc00>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -2756,7 +2756,7 @@ usb_1: usb@a6f8800 {
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -3781,7 +3781,7 @@ usb_1: usb@a6f8800 {
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
@ -3829,7 +3829,7 @@ usb_2: usb@a8f8800 {
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_2_dwc3: usb@a800000 {
usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -2344,7 +2344,7 @@ usb_1: usb@a6f8800 {
resets = <&gcc GCC_USB30_PRIM_BCR>;
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check)
EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
stp x29, x30, [sp, #-16]!
mov x29, sp
alternative_if ARM64_SVE
bl __arm_smccc_sve_check
alternative_else_nop_endif
\instr #0
ldr x4, [sp]
ldr x4, [sp, #16]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
ldr x4, [sp, #8]
ldr x4, [sp, #24]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret
1: ldp x29, x30, [sp], #16
ret
.endm
/*

View File

@ -1339,7 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
#if CONFIG_PGTABLE_LEVELS > 3
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@ -1354,16 +1353,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
return 1;
}
int pud_clear_huge(pud_t *pudp)
{
if (!pud_sect(READ_ONCE(*pudp)))
return 0;
pud_clear(pudp);
return 1;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@ -1378,6 +1367,14 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
return 1;
}
int pud_clear_huge(pud_t *pudp)
{
if (!pud_sect(READ_ONCE(*pudp)))
return 0;
pud_clear(pudp);
return 1;
}
int pmd_clear_huge(pmd_t *pmdp)
{
if (!pmd_sect(READ_ONCE(*pmdp)))
@ -1385,7 +1382,6 @@ int pmd_clear_huge(pmd_t *pmdp)
pmd_clear(pmdp);
return 1;
}
#endif
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{

View File

@ -33,6 +33,7 @@ config MAC
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
select HAVE_PATA_PLATFORM
select LEGACY_TIMER_TICK
help
This option enables support for the Apple Macintosh series of

View File

@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}

View File

@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;

View File

@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
return H_BAD_MODE;
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_hv.vcpu_token >= NR_CPUS)
return H_PARAMETER;
/*
* L1 must have set up a suspended state to enter the L2 in a
* transactional state, and only in that case. These have to be
* filtered out here to prevent causing a TM Bad Thing in the
* host HRFID. We could synthesize a TM Bad Thing back to the L1
* here but there doesn't seem like much point.
*/
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
if (!MSR_TM_ACTIVE(l2_regs.msr))
return H_BAD_MODE;
} else {
if (l2_regs.msr & MSR_TS_MASK)
return H_BAD_MODE;
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
return H_BAD_MODE;
}
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)

View File

@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_return_to_guest:
#endif
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* is in real suspend mode and is trying to transition to
* transactional mode.
*/
if (local_paca->kvm_hstate.fake_suspend &&
if (!local_paca->kvm_hstate.fake_suspend &&
(vcpu->arch.shregs.msr & MSR_TS_S)) {
if (kvmhv_p9_tm_emulation_early(vcpu)) {
/* Prevent it being handled again. */
trap = 0;
/*
* Go straight back into the guest with the
* new NIP/MSR as set by TM emulation.
*/
mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
/*
* tm_return_to_guest re-loads SRR0/1, DAR,
* DSISR after RI is cleared, in case they had
* been clobbered by a MCE.
*/
__mtmsrd(0, 1); /* clear RI */
goto tm_return_to_guest;
}
}
#endif
@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
__mtmsrd(msr, 0);
end_timing(vcpu);

View File

@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
/*
* Don't overflow our args array: ensure there is room for
* at least rets[0] (even if the call specifies 0 nret).
*
* Each handler must then check for the correct nargs and nret
* values, but they may always return failure in rets[0].
*/
rc = -EINVAL;
goto fail;
}
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
fail:
/*
* We only get here if the guest has called RTAS with a bogus
* args pointer. That means we can't get to the args, and so we
* can't fail the RTAS call. So fail right out to userspace,
* which should kill the guest.
* args pointer or nargs/nret values that would overflow the
* array. That means we can't get to the args, and so we can't
* fail the RTAS call. So fail right out to userspace, which
* should kill the guest.
*
* SLOF should actually pass the hcall return value from the
* rtas handler call in r3, so enter_rtas could be modified to
* return a failure indication in r3 and we could return such
* errors to the guest rather than failing to host userspace.
* However old guests that don't test for failure could then
* continue silently after errors, so for now we won't do this.
*/
return rc;
}

View File

@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_enable_cap cap;
r = -EFAULT;
vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
vcpu_load(vcpu);
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu_put(vcpu);
break;
@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
vcpu_load(vcpu);
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu_put(vcpu);
break;

View File

@ -240,3 +240,13 @@ void __init setup_kuap(bool disabled)
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
#endif
int pud_clear_huge(pud_t *pud)
{
return 0;
}
int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}

View File

@ -42,6 +42,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take

View File

@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
/* Load initrd at enough distance from DRAM start */
/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
return image_addr + SZ_256M;
return ULONG_MAX;
}
#define alloc_screen_info(x...) (&screen_info)

View File

@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
if (likely(task && task != current && !task_is_running(task)))
if (likely(task && task != current && !task_is_running(task))) {
if (!try_get_task_stack(task))
return 0;
walk_stackframe(task, NULL, save_wchan, &pc);
put_task_stack(task);
}
return pc;
}

View File

@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
* t0 - end of uncopied dst
*/
add t0, a0, a2
bgtu a0, t0, 5f
/*
* Use byte copy only if too small.
* SZREG holds 4 for RV32 and 8 for RV64
*/
li a3, 8*SZREG /* size must be larger than size in word_copy */
li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
* Copy first bytes until dst is align to word boundary.
* Copy first bytes until dst is aligned to word boundary.
* a0 - start of dst
* t1 - start of aligned dst
*/
addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */
beq a0, t1, .Lskip_first_bytes
beq a0, t1, .Lskip_align_dst
1:
/* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f
@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */
.Lskip_first_bytes:
.Lskip_align_dst:
/*
* Now dst is aligned.
* Use shift-copy if src is misaligned.
@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
*
* a0 - start of aligned dst
* a1 - start of aligned src
* a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
addi t0, t0, -(8*SZREG-1) /* not to over run */
addi t0, t0, -(8*SZREG) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
addi t0, t0, 8*SZREG-1 /* revert to original value */
addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy:
@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
* For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and
* do some shifts.
* This is safe because reading less than a word size.
* This is safe because reading is less than a word size.
*
* a0 - start of aligned dst
* a1 - start of src
@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
*/
/* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1)
/* Converting unaligned src to aligned arc */
/* Converting unaligned src to aligned src */
andi a1, a1, ~(SZREG-1)
/*
@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
* t3 - prev shift
* t4 - current shift
*/
slli t3, a3, LGREG
slli t3, a3, 3 /* converting bytes in a3 to bits */
li a5, SZREG*8
sub t4, a5, t3
/* Load the first word to combine with seceond word */
/* Load the first word to combine with second word */
fixup REG_L a5, 0(a1), 10f
3:
@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
* a1 - start of remaining src
* t0 - end of remaining dst
*/
bgeu a0, t0, 5f
bgeu a0, t0, .Lout_copy_user /* check if end of copy */
4:
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */
5:
.Lout_copy_user:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0

View File

@ -127,10 +127,17 @@ void __init mem_init(void)
}
/*
* The default maximal physical memory size is -PAGE_OFFSET,
* limit the memory size via mem.
* The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
* whereas for 64-bit kernel, the end of the virtual address space is occupied
* by the modules/BPF/kernel mappings which reduces the available size of the
* linear mapping.
* Limit the memory size via mem.
*/
#ifdef CONFIG_64BIT
static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
#endif
static int __init early_mem(char *p)
{
@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
phys_addr_t __maybe_unused max_mapped_addr;
phys_addr_t dram_end;
#ifdef CONFIG_XIP_KERNEL
@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
dram_end = memblock_end_of_DRAM();
#ifndef CONFIG_64BIT
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
* if end of dram is equal to maximum addressable memory.
* if end of dram is equal to maximum addressable memory. For 64-bit
* kernel, this problem can't happen here as the end of the virtual
* address space is occupied by the kernel mapping then this check must
* be done in create_kernel_page_table.
*/
max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (dram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
#endif
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % map_size) != 0);
#ifdef CONFIG_64BIT
/*
* The last 4K bytes of the addressable memory can not be mapped because
* of IS_ERR_VALUE macro.
*/
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {

View File

@ -9,16 +9,6 @@
#include <asm/errno.h>
#include <asm/sigp.h>
#ifdef CC_USING_EXPOLINE
.pushsection .dma.text.__s390_indirect_jump_r14,"axG"
__dma__s390_indirect_jump_r14:
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.popsection
#endif
.section .dma.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
#ifdef CC_USING_EXPOLINE
jg __dma__s390_indirect_jump_r14
#else
br %r14
#endif
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.endm
/*

View File

@ -5,7 +5,12 @@ CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@ -28,14 +33,13 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_MISC=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@ -76,6 +80,7 @@ CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
@ -95,6 +100,7 @@ CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
@ -158,6 +164,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
CONFIG_MPTCP=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@ -280,6 +287,7 @@ CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@ -384,12 +392,11 @@ CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
@ -436,7 +443,7 @@ CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@ -453,6 +460,7 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
@ -495,6 +503,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@ -551,7 +560,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
CONFIG_PPS=m
@ -574,7 +582,6 @@ CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
@ -619,6 +626,7 @@ CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_NETFS_STATS=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@ -654,7 +662,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
@ -682,6 +689,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
CONFIG_SECURITY_LANDLOCK=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
@ -696,6 +704,7 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@ -843,7 +852,6 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
@ -853,3 +861,4 @@ CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m

View File

@ -4,6 +4,11 @@ CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@ -26,14 +31,13 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_MISC=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@ -70,6 +74,7 @@ CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
@ -87,6 +92,7 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
@ -149,6 +155,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
CONFIG_MPTCP=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@ -271,6 +278,7 @@ CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@ -374,11 +382,10 @@ CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
@ -427,7 +434,7 @@ CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@ -444,6 +451,7 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
@ -487,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@ -543,7 +552,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
# CONFIG_PTP_1588_CLOCK is not set
@ -566,7 +574,6 @@ CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
@ -607,6 +614,7 @@ CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_NETFS_STATS=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@ -642,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
@ -669,6 +676,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
CONFIG_SECURITY_LANDLOCK=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
@ -684,6 +692,7 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@ -754,6 +763,7 @@ CONFIG_CRC8=m
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
@ -781,3 +791,4 @@ CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m

View File

@ -29,9 +29,9 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
# CONFIG_PCPU_DEV_REFCNT is not set
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
@ -51,7 +51,6 @@ CONFIG_ZFCP=y
# CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HMC_DRV is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set

View File

@ -19,6 +19,7 @@ void ftrace_caller(void);
extern char ftrace_graph_caller_end;
extern unsigned long ftrace_plt;
extern void *ftrace_func;
struct dyn_arch_ftrace { };

View File

@ -40,6 +40,7 @@
* trampoline (ftrace_plt), which clobbers also r1.
*/
void *ftrace_func __read_mostly = ftrace_stub;
unsigned long ftrace_plt;
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}

View File

@ -59,13 +59,13 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_trace_function
lgrl %r1,ftrace_func
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
larl %r1,ftrace_func
lg %r1,0(%r1)
#endif
lgr %r3,%r14

View File

@ -745,7 +745,7 @@ static int __init cpumf_pmu_init(void)
if (!cf_dbg) {
pr_err("Registration of s390dbf(cpum_cf) failed\n");
return -ENOMEM;
};
}
debug_register_view(cf_dbg, &debug_sprintf_view);
cpumf_pmu.attr_groups = cpumf_cf_event_group();

View File

@ -29,6 +29,7 @@ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
obj-y += vdso32_wrapper.o
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Disable gcov profiling, ubsan and kasan for VDSO code

View File

@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{
u32 r1 = reg2hex[b1];
if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
jit->seen_reg[r1] = 1;
}

View File

@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
BUG_ON(ret);
}

View File

@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
static __always_inline void
__jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
const struct jump_label_patch jlp = __jump_label_patch(entry, type);

View File

@ -682,7 +682,6 @@ int p4d_clear_huge(p4d_t *p4d)
}
#endif
#if CONFIG_PGTABLE_LEVELS > 3
/**
* pud_set_huge - setup kernel PUD mapping
*
@ -721,23 +720,6 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
return 1;
}
/**
* pud_clear_huge - clear kernel PUD mapping when it is set
*
* Returns 1 on success and 0 on failure (no PUD map is found).
*/
int pud_clear_huge(pud_t *pud)
{
if (pud_large(*pud)) {
pud_clear(pud);
return 1;
}
return 0;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
/**
* pmd_set_huge - setup kernel PMD mapping
*
@ -768,6 +750,21 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
return 1;
}
/**
* pud_clear_huge - clear kernel PUD mapping when it is set
*
* Returns 1 on success and 0 on failure (no PUD map is found).
*/
int pud_clear_huge(pud_t *pud)
{
if (pud_large(*pud)) {
pud_clear(pud);
return 1;
}
return 0;
}
/**
* pmd_clear_huge - clear kernel PMD mapping when it is set
*
@ -782,7 +779,6 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
#endif
#ifdef CONFIG_X86_64
/**

View File

@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
bool "Override ACPI tables from built-in initrd"
depends on ACPI_TABLE_UPGRADE
depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
help
This option provides functionality to override arbitrary ACPI tables
from built-in uncompressed initrd.

View File

@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present);
* Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
* FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
* in the case of a hotplug event. That said, the caller should ensure that
* this will never happen.
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
* On the other hand the function invokes acpi_dev_put() on the given @adev
* assuming that its reference counter had been increased beforehand.
*
* See additional information in acpi_dev_present() as well.
*/
@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
acpi_dev_put(adev);
return dev ? to_acpi_device(dev) : NULL;
}
EXPORT_SYMBOL(acpi_dev_get_next_match_dev);

View File

@ -229,6 +229,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
{
int ret;
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
@ -244,7 +246,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
auxdrv->driver.bus = &auxiliary_bus_type;
auxdrv->driver.mod_name = modname;
return driver_register(&auxdrv->driver);
ret = driver_register(&auxdrv->driver);
if (ret)
kfree(auxdrv->driver.name);
return ret;
}
EXPORT_SYMBOL_GPL(__auxiliary_driver_register);

View File

@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
if (device_is_registered(con)) {
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
}
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);

View File

@ -4100,8 +4100,6 @@ static void rbd_acquire_lock(struct work_struct *work)
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
{
bool need_wait;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
*/
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
need_wait = !list_empty(&rbd_dev->running_list);
downgrade_write(&rbd_dev->lock_rwsem);
if (need_wait)
wait_for_completion(&rbd_dev->releasing_wait);
up_read(&rbd_dev->lock_rwsem);
if (list_empty(&rbd_dev->running_list))
return true;
up_write(&rbd_dev->lock_rwsem);
wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
/*
* we already know that the remote client is
* the owner
*/
up_write(&rbd_dev->lock_rwsem);
return;
dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
__func__, rbd_dev, cid.gid, cid.handle);
} else {
rbd_set_owner_cid(rbd_dev, &cid);
}
rbd_set_owner_cid(rbd_dev, &cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
__func__, rbd_dev, cid.gid, cid.handle,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
up_write(&rbd_dev->lock_rwsem);
return;
} else {
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
}
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
}
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */

View File

@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
mhi_chan = &mhi_cntrl->mhi_chan[chan];
write_lock_bh(&mhi_chan->lock);
mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
complete(&mhi_chan->completion);
write_unlock_bh(&mhi_chan->lock);
if (chan < mhi_cntrl->max_chan &&
mhi_cntrl->mhi_chan[chan].configured) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
write_lock_bh(&mhi_chan->lock);
mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
complete(&mhi_chan->completion);
write_unlock_bh(&mhi_chan->lock);
} else {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Completion packet for invalid channel ID: %d\n", chan);
}
mhi_del_ring_element(mhi_cntrl, mhi_ring);
}

View File

@ -32,6 +32,8 @@
* @edl: emergency download mode firmware path (if any)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
@ -40,6 +42,7 @@ struct mhi_pci_dev_info {
const char *edl;
unsigned int bar_num;
unsigned int dma_data_width;
bool sideband_wake;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@ -72,6 +75,22 @@ struct mhi_pci_dev_info {
.doorbell_mode_switch = false, \
}
#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
.auto_queue = true, \
}
#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
{ \
.num_elements = el_count, \
@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.edl = "qcom/sdx65m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32
.dma_data_width = 32,
.sideband_wake = true,
};
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_quectel_em1xx_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32
.dma_data_width = 32,
.sideband_wake = true,
};
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct pci_device_id mhi_pci_id_table[] = {
@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)

View File

@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
break;
if (!adev->pnp.unique_id && node->acpi.uid == 0)
break;
acpi_dev_put(adev);
}
if (!adev)
return -ENODEV;

View File

@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
{
struct resource *res, *parent;
int ret;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res)
@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
/* we expect a conflict with a 'System RAM' region */
parent = request_resource_conflict(&iomem_resource, res);
return parent ? request_resource(parent, res) : 0;
ret = parent ? request_resource(parent, res) : 0;
/*
* Given that efi_mem_reserve_iomem() can be called at any
* time, only call memblock_reserve() if the architecture
* keeps the infrastructure around.
*/
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
memblock_reserve(addr, size);
return ret;
}
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)

View File

@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @load_addr: pointer to loaded initrd
* @load_size: size of loaded initrd
* @soft_limit: preferred size of allocated memory for loading the initrd
* @hard_limit: minimum size of allocated memory
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
*
* Return: status code
*/

View File

@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
pr_err("EFI MOKvar config table is not valid\n");
return;
}
efi_mem_reserve(efi.mokvar_table, map_size_needed);
if (md.type == EFI_BOOT_SERVICES_DATA)
efi_mem_reserve(efi.mokvar_table, map_size_needed);
efi_mokvar_table_size = map_size_needed;
}

View File

@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
goto out;
} else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
pr_warn(FW_BUG "TPM Final Events table invalid\n");
goto out;
}

View File

@ -619,6 +619,13 @@ struct amdgpu_video_codec_info {
u32 max_level;
};
#define codec_info_build(type, width, height, level) \
.codec_type = type,\
.max_width = width,\
.max_height = height,\
.max_pixels_per_frame = height * width,\
.max_level = level,
struct amdgpu_video_codecs {
const u32 codec_count;
const struct amdgpu_video_codec_info *codec_array;

View File

@ -1190,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = {
/* Van Gogh */
{0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
/* Yellow Carp */
{0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
{0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
/* Navy_Flounder */
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},

View File

@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
* for debugger access to invisible VRAM. Should have used MAP_SHARED
* instead. Clearing VM_MAYWRITE prevents the mapping from ever
* becoming writable and makes is_cow_mapping(vm_flags) false.
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma->vm_flags &= ~VM_MAYWRITE;
return drm_gem_ttm_mmap(obj, vma);
}

View File

@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
};
@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020)

View File

@ -64,32 +64,13 @@
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
#define codec_info_build(type, width, height, level) \
.codec_type = type,\
.max_width = width,\
.max_height = height,\
.max_pixels_per_frame = height * width,\
.max_level = level,
static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode =
@ -101,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_decode =
@ -161,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_decode =
@ -228,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 8192 * 4352,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
@ -333,6 +164,19 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
.codec_array = NULL,
};
/* Yellow Carp*/
static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs yc_video_codecs_decode = {
.codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
.codec_array = yc_video_codecs_decode_array,
};
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@ -353,12 +197,17 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode;
return 0;
case CHIP_YELLOW_CARP:
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
case CHIP_BEIGE_GOBY:
if (encode)
*codecs = &bg_video_codecs_encode;
@ -1387,7 +1236,10 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x01;
if (adev->pdev->device == 0x1681)
adev->external_rev_id = adev->rev_id + 0x19;
else
adev->external_rev_id = adev->rev_id + 0x01;
break;
default:
/* FIXME: not supported yet */

View File

@ -88,20 +88,8 @@
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs rv_video_codecs_decode =
@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 52,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 4,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
.max_width = 4096,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
.max_width = 8192,
.max_height = 4352,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs rn_video_codecs_decode =

View File

@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
/* SOCCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_levels);
// DPREFCLK ???
/* DISPCLK */

View File

@ -48,6 +48,21 @@
#include "dc_dmub_srv.h"
#include "yellow_carp_offset.h"
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower) {
if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
clk_mgr_base->clks.z9_support = new_clocks->z9_support;
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
clk_mgr_base->clks.z9_support = new_clocks->z9_support;
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
return 0;
/* get FbMult value */
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
/*
* Register value of fbmult is in 8.16 format, we are converting to 31.32
* to leverage the fix point operations available in driver
*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
pll_req = dc_fixpt_from_int(fbmult_int_val);
/*
* since fractional part is only 16 bit in register definition but is 32 bit
* in our fix point definiton, need to shift left by 16 to obtain correct value
*/
pll_req.value |= fbmult_frac_val << 16;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
/* integer part is now VCO frequency in kHz */
return dc_fixpt_floor(pll_req);
}
static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
else if (a->z9_support != b->z9_support)
else if (a->zstate_support != b->zstate_support)
return false;
else if (a->dtbclk_en != b->dtbclk_en)
return false;
@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,

View File

@ -27,60 +27,6 @@
#define __DCN31_CLK_MGR_H__
#include "clk_mgr_internal.h"
//CLK1_CLK_PLL_REQ
#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
//CLK1_CLK0_DFS_CNTL
#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT 0x0
#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK 0x0000007FL
/*DPREF clock related*/
#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
//CLK3_0_CLK3_CLK_PLL_REQ
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
#define mmCLK0_CLK3_DFS_CNTL 0x16C60
#define mmCLK00_CLK0_CLK3_DFS_CNTL 0x16C60
#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E60
#define mmCLK02_CLK0_CLK3_DFS_CNTL 0x17060
#define mmCLK03_CLK0_CLK3_DFS_CNTL 0x17260
#define mmCLK0_CLK_PLL_REQ 0x16C10
#define mmCLK00_CLK0_CLK_PLL_REQ 0x16C10
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E10
#define mmCLK02_CLK0_CLK_PLL_REQ 0x17010
#define mmCLK03_CLK0_CLK_PLL_REQ 0x17210
#define mmCLK1_CLK_PLL_REQ 0x1B00D
#define mmCLK10_CLK1_CLK_PLL_REQ 0x1B00D
#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
#define mmCLK12_CLK1_CLK_PLL_REQ 0x1B40D
#define mmCLK13_CLK1_CLK_PLL_REQ 0x1B60D
#define mmCLK2_CLK_PLL_REQ 0x17E0D
/*AMCLK*/
#define mmCLK11_CLK1_CLK0_DFS_CNTL 0x1B23F
#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
#endif
struct dcn31_watermarks;
struct dcn31_smu_watermark_set {

View File

@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries(
*/
panel_mode = DP_PANEL_MODE_DEFAULT;
}
} else
panel_mode = DP_PANEL_MODE_DEFAULT;
}
}
#endif
@ -4650,7 +4649,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
}
}
if (link->dpcd_caps.panel_mode_edp) {
if (link->dpcd_caps.panel_mode_edp &&
(link->connector_signal == SIGNAL_TYPE_EDP ||
(link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
link->is_internal_display))) {
return DP_PANEL_MODE_EDP;
}
@ -4914,9 +4916,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
{
uint32_t default_backlight;
if (link &&
(link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!dc_link_read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
// if < 5 nits or > 5000, it might be wrong readback

View File

@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
* did not show such problems, so this seems to be the exception.
*/
if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;

View File

@ -354,10 +354,10 @@ enum dcn_pwr_state {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_z9_support_state {
DCN_Z9_SUPPORT_UNKNOWN,
DCN_Z9_SUPPORT_ALLOW,
DCN_Z9_SUPPORT_DISALLOW,
enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_UNKNOWN,
DCN_ZSTATE_SUPPORT_ALLOW,
DCN_ZSTATE_SUPPORT_DISALLOW,
};
#endif
/*
@ -378,7 +378,7 @@ struct dc_clocks {
int dramclk_khz;
bool p_state_change_support;
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_z9_support_state z9_support;
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
#endif
enum dcn_pwr_state pwr_state;

View File

@ -636,6 +636,7 @@ struct dce_hwseq_registers {
uint32_t ODM_MEM_PWR_CTRL3;
uint32_t DMU_MEM_PWR_CNTL;
uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@ -1110,7 +1111,8 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_FORCEON;\
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;
type HPO_HDMISTREAMCLK_G_GATE_DIS;\
type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)

View File

@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb(
const struct line_buffer_params *lb_params,
enum lb_memory_config mem_size_config)
{
uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
/* LB */
if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL caps: pixel data processed in fixed format */
@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb(
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
if (dpp->base.caps->max_lb_partitions == 31)
max_partitions = 31;
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
LB_MAX_PARTITIONS, 63);
LB_MAX_PARTITIONS, max_partitions);
}
static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)

View File

@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context(
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.hactive =
timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipes[pipe_cnt].pipe.dest.vactive =
timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
return false;
}
static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context)
{
int plane_count;
int i;
plane_count = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
/*
* Zstate is allowed in following scenarios:
* 1. Single eDP with PSR enabled
* 2. 0 planes (No memory requests)
* 3. Single eDP without PSR but > 5ms stutter period
*/
if (plane_count == 0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
|| context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else
return DCN_ZSTATE_SUPPORT_DISALLOW;
} else
return DCN_ZSTATE_SUPPORT_DISALLOW;
}
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params(
int vlevel)
{
int i, pipe_idx;
int plane_count;
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
plane_count = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
if (plane_count == 0)
context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);

View File

@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
scl_data->format == PIXEL_FORMAT_FP16)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
@ -1440,15 +1433,6 @@ bool dpp3_construct(
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
LB_PIXEL_DEPTH_30BPP |
LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
return true;
}

View File

@ -154,6 +154,7 @@
SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI(CURSOR_CONTROL, CURSOR0_, id),\
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
#define DPP_REG_LIST_DCN30(id)\
@ -163,8 +164,6 @@
SRI(CM_SHAPER_LUT_DATA, CM, id),\
SRI(CM_MEM_PWR_CTRL2, CM, id), \
SRI(CM_MEM_PWR_STATUS2, CM, id), \
SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\

View File

@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */

View File

@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */

View File

@ -47,6 +47,7 @@
#include "dce/dmub_outbox.h"
#include "dc_link_dp.h"
#include "inc/link_dpcd.h"
#include "dcn10/dcn10_hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@ -594,3 +595,20 @@ bool dcn31_is_abm_supported(struct dc *dc,
}
return false;
}
static void apply_riommu_invalidation_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
if (!hws->wa.early_riommu_invalidation)
return;
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
}
void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
{
dcn10_init_pipes(dc, context);
apply_riommu_invalidation_wa(dc);
}

View File

@ -52,5 +52,6 @@ void dcn31_reset_hw_ctx_wrap(
struct dc_state *context);
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN31_H__ */

View File

@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@ -104,7 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
.init_pipes = dcn10_init_pipes,
.init_pipes = dcn31_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,

View File

@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.sr_exit_z8_time_us = 402.0,
.sr_enter_plus_exit_z8_time_us = 520.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = {
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = {
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.early_riommu_invalidation = true;
}
return hws;
}

View File

@ -4889,7 +4889,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
|| mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];

View File

@ -289,6 +289,9 @@ struct dpp_caps {
/* DSCL processing pixel data in fixed or float format */
enum dscl_data_processing_format dscl_data_proc_format;
/* max LB partitions */
unsigned int max_lb_partitions;
/* Calculates the number of partitions in the line buffer.
* The implementation of this function is overloaded for
* different versions of DSCL LB.

View File

@ -41,6 +41,7 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
bool early_riommu_invalidation;
};
struct hwseq_wa_state {

View File

@ -101,7 +101,8 @@
#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
#define PPSMC_Message_Count 0x43
#define PPSMC_MSG_BoardPowerCalibration 0x43
#define PPSMC_Message_Count 0x44
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00

View File

@ -225,7 +225,8 @@
__SMU_DUMMY_MAP(DisableDeterminism), \
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery),
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \
__SMU_DUMMY_MAP(BoardPowerCalibration),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type

View File

@ -34,7 +34,7 @@
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
/* MP Apertures */
#define MP0_Public 0x03800000

View File

@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
return ret;
}
static bool aldebaran_is_primary(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
return adev->smuio.funcs->get_die_id(adev) == 0;
return true;
}
static int aldebaran_run_board_btc(struct smu_context *smu)
{
u32 smu_version;
int ret;
if (!aldebaran_is_primary(smu))
return 0;
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
return ret;
}
if (smu_version <= 0x00441d00)
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
if (ret)
dev_err(smu->adev->dev, "Board power calibration failed!\n");
return ret;
}
static int aldebaran_run_btc(struct smu_context *smu)
{
int ret;
@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
if (ret)
dev_err(smu->adev->dev, "RunDcBtc failed!\n");
else
ret = aldebaran_run_board_btc(smu);
return ret;
}
@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
return (abs(frequency1 - frequency2) <= EPSILON);
}
static bool aldebaran_is_primary(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
return adev->smuio.funcs->get_die_id(adev) == 0;
return true;
}
static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)

View File

@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {

View File

@ -25,10 +25,8 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_memcpy.h"
struct eb_vma {
struct i915_vma *vma;
@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
int err;
struct intel_engine_cs *engine = eb->engine;
/* If we need to copy for the cmdparser, we will stall anyway */
if (eb_use_cmdparser(eb))
return ERR_PTR(-EWOULDBLOCK);
if (!reloc_can_use_engine(engine)) {
engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
if (!engine)
@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
return vma;
}
struct eb_parse_work {
struct dma_fence_work base;
struct intel_engine_cs *engine;
struct i915_vma *batch;
struct i915_vma *shadow;
struct i915_vma *trampoline;
unsigned long batch_offset;
unsigned long batch_length;
unsigned long *jump_whitelist;
const void *batch_map;
void *shadow_map;
};
static int __eb_parse(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
int ret;
bool cookie;
cookie = dma_fence_begin_signalling();
ret = intel_engine_cmd_parser(pw->engine,
pw->batch,
pw->batch_offset,
pw->batch_length,
pw->shadow,
pw->jump_whitelist,
pw->shadow_map,
pw->batch_map);
dma_fence_end_signalling(cookie);
return ret;
}
static void __eb_parse_release(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
if (!IS_ERR_OR_NULL(pw->jump_whitelist))
kfree(pw->jump_whitelist);
if (pw->batch_map)
i915_gem_object_unpin_map(pw->batch->obj);
else
i915_gem_object_unpin_pages(pw->batch->obj);
i915_gem_object_unpin_map(pw->shadow->obj);
if (pw->trampoline)
i915_active_release(&pw->trampoline->active);
i915_active_release(&pw->shadow->active);
i915_active_release(&pw->batch->active);
}
static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
.release = __eb_parse_release,
};
static inline int
__parser_mark_active(struct i915_vma *vma,
struct intel_timeline *tl,
struct dma_fence *fence)
{
struct intel_gt_buffer_pool_node *node = vma->private;
return i915_active_ref(&node->active, tl->fence_context, fence);
}
static int
parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
{
int err;
mutex_lock(&tl->mutex);
err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
if (err)
goto unlock;
if (pw->trampoline) {
err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
if (err)
goto unlock;
}
unlock:
mutex_unlock(&tl->mutex);
return err;
}
static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *shadow,
struct i915_vma *trampoline)
{
struct eb_parse_work *pw;
struct drm_i915_gem_object *batch = eb->batch->vma->obj;
bool needs_clflush;
int err;
GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
pw = kzalloc(sizeof(*pw), GFP_KERNEL);
if (!pw)
return -ENOMEM;
err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;
err = i915_active_acquire(&shadow->active);
if (err)
goto err_batch;
if (trampoline) {
err = i915_active_acquire(&trampoline->active);
if (err)
goto err_shadow;
}
pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
if (IS_ERR(pw->shadow_map)) {
err = PTR_ERR(pw->shadow_map);
goto err_trampoline;
}
needs_clflush =
!(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
pw->batch_map = ERR_PTR(-ENODEV);
if (needs_clflush && i915_has_memcpy_from_wc())
pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
if (IS_ERR(pw->batch_map)) {
err = i915_gem_object_pin_pages(batch);
if (err)
goto err_unmap_shadow;
pw->batch_map = NULL;
}
pw->jump_whitelist =
intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
trampoline);
if (IS_ERR(pw->jump_whitelist)) {
err = PTR_ERR(pw->jump_whitelist);
goto err_unmap_batch;
}
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
pw->trampoline = trampoline;
/* Mark active refs early for this worker, in case we get interrupted */
err = parser_mark_active(pw, eb->context->timeline);
if (err)
goto err_commit;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
goto err_commit;
err = dma_resv_reserve_shared(shadow->resv, 1);
if (err)
goto err_commit;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
goto err_commit;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
/* Force execution to wait for completion of the parser */
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
dma_fence_work_commit_imm(&pw->base);
return 0;
err_commit:
i915_sw_fence_set_error_once(&pw->base.chain, err);
dma_fence_work_commit_imm(&pw->base);
return err;
err_unmap_batch:
if (pw->batch_map)
i915_gem_object_unpin_map(batch);
else
i915_gem_object_unpin_pages(batch);
err_unmap_shadow:
i915_gem_object_unpin_map(shadow->obj);
err_trampoline:
if (trampoline)
i915_active_release(&trampoline->active);
err_shadow:
i915_active_release(&shadow->active);
err_batch:
i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
}
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
/*
@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err_trampoline;
}
err = eb_parse_pipeline(eb, shadow, trampoline);
err = dma_resv_reserve_shared(shadow->resv, 1);
if (err)
goto err_trampoline;
err = intel_engine_cmd_parser(eb->engine,
eb->batch->vma,
eb->batch_start_offset,
eb->batch_len,
shadow, trampoline);
if (err)
goto err_unpin_batch;

View File

@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
intel_gt_pm_get(&eb.i915->gt);
for_each_uabi_engine(eb.engine, eb.i915) {
if (intel_engine_requires_cmd_parser(eb.engine) ||
intel_engine_using_cmd_parser(eb.engine))
continue;
reloc_cache_init(&eb.reloc_cache, eb.i915);
memset(map, POISON_INUSE, 4096);

View File

@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
/*
* Due to d3_entered is used to indicate skipping PPGTT invalidation on
* vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
* vGPU reset if in resuming.
* In S0ix exit, the device power state also transite from D3 to D0 as
* S3 resume, but no vGPU reset (triggered by QEMU devic model). After
* S0ix exit, all engines continue to work. However the d3_entered
* remains set which will break next vGPU reset logic (miss the expected
* PPGTT invalidation).
* Engines can only work in D0. Thus the 1st elsp write gives GVT a
* chance to clear d3_entered.
*/
if (vgpu->d3_entered)
vgpu->d3_entered = false;
execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;

View File

@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
unsigned long offset, unsigned long length,
void *dst, const void *src)
bool *needs_clflush_after)
{
bool needs_clflush =
!(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
unsigned int src_needs_clflush;
unsigned int dst_needs_clflush;
void *dst, *src;
int ret;
if (src) {
GEM_BUG_ON(!needs_clflush);
i915_unaligned_memcpy_from_wc(dst, src + offset, length);
} else {
struct scatterlist *sg;
ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret)
return ERR_PTR(ret);
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
return dst;
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}
src = ERR_PTR(-ENODEV);
if (src_needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
i915_unaligned_memcpy_from_wc(dst,
src + offset,
length);
i915_gem_object_unpin_map(src_obj);
}
}
if (IS_ERR(src)) {
unsigned long x, n, remain;
void *ptr;
unsigned int x, sg_ofs;
unsigned long remain;
/*
* We can avoid clflushing partial cachelines before the write
@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* validate up to the end of the batch.
*/
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
if (dst_needs_clflush & CLFLUSH_BEFORE)
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
while (remain) {
unsigned long sg_max = sg->length >> PAGE_SHIFT;
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush)
drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len);
kunmap_atomic(src);
for (; remain && sg_ofs < sg_max; sg_ofs++) {
unsigned long len = min(remain, PAGE_SIZE - x);
void *map;
map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
if (needs_clflush)
drm_clflush_virt_range(map + x, len);
memcpy(ptr, map + x, len);
kunmap_atomic(map);
ptr += len;
remain -= len;
x = 0;
}
sg_ofs = 0;
sg = sg_next(sg);
ptr += len;
remain -= len;
x = 0;
}
}
i915_gem_object_finish_access(src_obj);
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
return dst;
}
@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
if (target_cmd_index == offset)
return 0;
if (IS_ERR(jump_whitelist))
return PTR_ERR(jump_whitelist);
if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
/**
* intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
* @batch_length: length of the commands in batch_obj
* @trampoline: Whether jump trampolines are used.
*
* Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
* This has to be preallocated, because the command parser runs in signaling context,
* and may not allocate any memory.
*
* Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
* IS_ERR() to check for errors. Must bre freed() with kfree().
*
* NULL is a valid value, meaning no allocation was required.
*/
unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
bool trampoline)
static unsigned long *alloc_whitelist(u32 batch_length)
{
unsigned long *jmp;
if (trampoline)
return NULL;
/*
* We expect batch_length to be less than 256KiB for known users,
* i.e. we need at most an 8KiB bitmap allocation which should be
@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
* @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
* @shadow_map: mapping to @shadow vma
* @batch_map: mapping to @batch vma
* @trampoline: true if we need to trampoline into privileged execution
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
unsigned long *jump_whitelist,
void *shadow_map,
const void *batch_map)
bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false;
unsigned long *jump_whitelist;
u64 batch_addr, shadow_addr;
int ret = 0;
bool trampoline = !jump_whitelist;
GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
batch->size));
GEM_BUG_ON(!batch_length);
cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
shadow_map, batch_map);
cmd = copy_batch(shadow->obj, batch->obj,
batch_offset, batch_length,
&needs_clflush_after);
if (IS_ERR(cmd)) {
DRM_DEBUG("CMD: Failed to copy batch\n");
return PTR_ERR(cmd);
}
jump_whitelist = NULL;
if (!trampoline)
/* Defer failure until attempted use */
jump_whitelist = alloc_whitelist(batch_length);
shadow_addr = gen8_canonical_addr(shadow->node.start);
batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
i915_gem_object_unpin_map(shadow->obj);
return ret;
}

View File

@ -1906,17 +1906,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
bool trampoline);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
unsigned long *jump_whitelist,
void *shadow_map,
const void *batch_map);
bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */

View File

@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
i915_sw_fence_set_error_once(&rq->submit, fence->error);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
}
if (fence->context == rq->fence.context)
continue;
@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
i915_sw_fence_set_error_once(&rq->submit, fence->error);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
}
/*
* Requests on the same timeline are explicitly ordered, along

View File

@ -149,6 +149,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
*/
if (bo->base.dev)
drm_gem_object_release(&bo->base);
else
dma_resv_fini(&bo->base._resv);
kfree(nvbo);
}
@ -330,6 +332,10 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.size = size;
dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node);
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;

View File

@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
kfree(ts->dsi);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More