Merge 4.20-rc6 into staging-next

We want the staging fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2018-12-10 09:23:50 +01:00
commit d3086550fa
294 changed files with 3084 additions and 1266 deletions

View File

@ -1,4 +1,4 @@
What: /sys/class/net/<iface>/tagging
What: /sys/class/net/<iface>/dsa/tagging
Date: August 2018
KernelVersion: 4.20
Contact: netdev@vger.kernel.org

View File

@ -168,3 +168,19 @@ a shared clock is forbidden.
Configuration of common clocks, which affect multiple consumer devices can
be similarly specified in the clock provider node.
==Protected clocks==
Some platforms or firmwares may not fully expose all the clocks to the OS, such
as in situations where those clks are used by drivers running in ARM secure
execution levels. Such a configuration can be specified in device tree with the
protected-clocks property in the form of a clock specifier list. This property should
only be specified in the node that is providing the clocks being protected:
clock-controller@a000f000 {
compatible = "vendor,clk95;
reg = <0xa000f000 0x1000>
#clocks-cells = <1>;
...
protected-clocks = <UART3_CLK>, <SPI5_CLK>;
};

View File

@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
a set of keys.
Required property:
sysrq-reset-seq: array of Linux keycodes, one keycode per cell.
keyset: array of Linux keycodes, one keycode per cell.
Optional property:
timeout-ms: duration keys must be pressed together in milliseconds before

View File

@ -1,29 +0,0 @@
device-tree bindings for rockchip VPU codec
Rockchip (Video Processing Unit) present in various Rockchip platforms,
such as RK3288 and RK3399.
Required properties:
- compatible: value should be one of the following
"rockchip,rk3288-vpu";
"rockchip,rk3399-vpu";
- interrupts: encoding and decoding interrupt specifiers
- interrupt-names: should be "vepu" and "vdpu"
- clocks: phandle to VPU aclk, hclk clocks
- clock-names: should be "aclk" and "hclk"
- power-domains: phandle to power domain node
- iommus: phandle to a iommu node
Example:
SoC-specific DT entry:
vpu: video-codec@ff9a0000 {
compatible = "rockchip,rk3288-vpu";
reg = <0x0 0xff9a0000 0x0 0x800>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vepu", "vdpu";
clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
clock-names = "aclk", "hclk";
power-domains = <&power RK3288_PD_VIDEO>;
iommus = <&vpu_mmu>;
};

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_request_alloc:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_request_ioc_queue:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_request_ioc_reinit:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media-request-api:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-close:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-ioctl:

View File

@ -1,4 +1,28 @@
.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. This file is dual-licensed: you can use it either under the terms
.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
.. dual licensing only applies to this file, and not this project as a
.. whole.
..
.. a) This file is free software; you can redistribute it and/or
.. modify it under the terms of the GNU General Public License as
.. published by the Free Software Foundation; either version 2 of
.. the License, or (at your option) any later version.
..
.. This file is distributed in the hope that it will be useful,
.. but WITHOUT ANY WARRANTY; without even the implied warranty of
.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
.. GNU General Public License for more details.
..
.. Or, alternatively,
..
.. b) Permission is granted to copy, distribute and/or modify this
.. document under the terms of the GNU Free Documentation License,
.. Version 1.1 or any later version published by the Free Software
.. Foundation, with no Invariant Sections, no Front-Cover Texts
.. and no Back-Cover Texts. A copy of the license is included at
.. Documentation/media/uapi/fdl-appendix.rst.
..
.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-poll:

View File

@ -1480,6 +1480,7 @@ F: drivers/clk/sirf/
F: drivers/clocksource/timer-prima2.c
F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
X: drivers/gnss
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@armlinux.org.uk>
@ -3279,11 +3280,16 @@ S: Maintained
F: sound/pci/oxygen/
C-SKY ARCHITECTURE
M: Guo Ren <ren_guo@c-sky.com>
M: Guo Ren <guoren@kernel.org>
T: git https://github.com/c-sky/csky-linux.git
S: Supported
F: arch/csky/
F: Documentation/devicetree/bindings/csky/
F: drivers/irqchip/irq-csky-*
F: Documentation/devicetree/bindings/interrupt-controller/csky,*
F: drivers/clocksource/timer-gx6605s.c
F: drivers/clocksource/timer-mp-csky.c
F: Documentation/devicetree/bindings/timer/csky,*
K: csky
N: csky
@ -6324,6 +6330,7 @@ F: include/uapi/linux/gigaset_dev.h
GNSS SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
S: Maintained
F: Documentation/ABI/testing/sysfs-class-gnss
F: Documentation/devicetree/bindings/gnss/
@ -13899,6 +13906,13 @@ F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
SOCIONEXT (SNI) AVE NETWORK DRIVER
M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/socionext/sni_ave.c
F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
M: Jassi Brar <jaswinder.singh@linaro.org>
L: netdev@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 20
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Shy Crocodile
# *DOCUMENTATION*

View File

@ -109,7 +109,7 @@ endmenu
choice
prompt "ARC Instruction Set"
default ISA_ARCOMPACT
default ISA_ARCV2
config ISA_ARCOMPACT
bool "ARCompact ISA"
@ -176,13 +176,11 @@ endchoice
config CPU_BIG_ENDIAN
bool "Enable Big Endian Mode"
default n
help
Build kernel for Big Endian Mode of ARC CPU
config SMP
bool "Symmetric Multi-Processing"
default n
select ARC_MCIP if ISA_ARCV2
help
This enables support for systems with more than one CPU.
@ -254,7 +252,6 @@ config ARC_CACHE_PAGES
config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
default n
endif #ARC_CACHE
@ -262,7 +259,6 @@ config ARC_HAS_ICCM
bool "Use ICCM"
help
Single Cycle RAMS to store Fast Path Code
default n
config ARC_ICCM_SZ
int "ICCM Size in KB"
@ -273,7 +269,6 @@ config ARC_HAS_DCCM
bool "Use DCCM"
help
Single Cycle RAMS to store Fast Path Data
default n
config ARC_DCCM_SZ
int "DCCM Size in KB"
@ -366,13 +361,11 @@ if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
bool "Setup Timer IRQ as high Priority"
default n
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
default n
help
Double Precision Floating Point unit had dedicated regs which
need to be saved/restored across context-switch.
@ -453,7 +446,6 @@ config HIGHMEM
config ARC_HAS_PAE40
bool "Support for the 40-bit Physical Address Extension"
default n
depends on ISA_ARCV2
select HIGHMEM
select PHYS_ADDR_T_64BIT
@ -496,7 +488,6 @@ config HZ
config ARC_METAWARE_HLINK
bool "Support for Metaware debugger assisted Host access"
default n
help
This options allows a Linux userland apps to directly access
host file system (open/creat/read/write etc) with help from
@ -524,13 +515,11 @@ config ARC_DW2_UNWIND
config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
endif
config ARC_UBOOT_SUPPORT
bool "Support uboot arg Handling"
default n
help
ARC Linux by default checks for uboot provided args as pointers to
external cmdline or DTB. This however breaks in absence of uboot,

View File

@ -6,7 +6,7 @@
# published by the Free Software Foundation.
#
KBUILD_DEFCONFIG := nsim_700_defconfig
KBUILD_DEFCONFIG := nsim_hs_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7

View File

@ -222,6 +222,21 @@ mmc@a000 {
bus-width = <4>;
dma-coherent;
};
gpio: gpio@3000 {
compatible = "snps,dw-apb-gpio";
reg = <0x3000 0x20>;
#address-cells = <1>;
#size-cells = <0>;
gpio_port_a: gpio-controller@0 {
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
snps,nr-gpios = <24>;
reg = <0>;
};
};
};
memory@80000000 {

View File

@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set

View File

@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set

View File

@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set

View File

@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FTRACE=y

View File

@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y

View File

@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set

View File

@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end;
/* IO coherency related Auxiliary registers */
#define ARC_REG_IO_COH_ENABLE 0x500
#define ARC_IO_COH_ENABLE_BIT BIT(0)
#define ARC_REG_IO_COH_PARTIAL 0x501
#define ARC_IO_COH_PARTIAL_BIT BIT(0)
#define ARC_REG_IO_COH_AP0_BASE 0x508
#define ARC_REG_IO_COH_AP0_SIZE 0x509

View File

@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
/*
* {read,write}s{b,w,l}() repeatedly access the same IO address in
* native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
* @count times
*/
#define __raw_readsx(t,f) \
static inline void __raw_reads##f(const volatile void __iomem *addr, \
void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
u##t x = __raw_read##f(addr); \
*buf++ = x; \
} while (--count); \
} else { \
do { \
u##t x = __raw_read##f(addr); \
put_unaligned(x, buf++); \
} while (--count); \
} \
}
#define __raw_readsb __raw_readsb
__raw_readsx(8, b)
#define __raw_readsw __raw_readsw
__raw_readsx(16, w)
#define __raw_readsl __raw_readsl
__raw_readsx(32, l)
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
#define __raw_writesx(t,f) \
static inline void __raw_writes##f(volatile void __iomem *addr, \
const void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
const u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
__raw_write##f(*buf++, addr); \
} while (--count); \
} else { \
do { \
__raw_write##f(get_unaligned(buf++), addr); \
} while (--count); \
} \
}
#define __raw_writesb __raw_writesb
__raw_writesx(8, b)
#define __raw_writesw __raw_writesw
__raw_writesx(16, w)
#define __raw_writesl __raw_writesl
__raw_writesx(32, l)
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/*
* Relaxed API for drivers which can handle barrier ordering themselves

View File

@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
int i, n = 0;
int i, n = 0, ua = 0;
FIX_PTR(cpu);
@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
#ifdef __ARC_UNALIGNED__
ua = 1;
#endif
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
if (i)
n += scnprintf(buf + n, len - n, "\n\t\t: ");

View File

@ -1144,6 +1144,20 @@ noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
/*
* If IOC was already enabled (due to bootloader) it technically needs to
* be reconfigured with aperture base,size corresponding to Linux memory map
* which will certainly be different than uboot's. But disabling and
* reenabling IOC when DMA might be potentially active is tricky business.
* To avoid random memory issues later, just panic here and ask user to
* upgrade bootloader to one which doesn't enable IOC
*/
if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
panic("IOC already enabled, please upgrade bootloader!\n");
if (!ioc_enable)
return;
/*
* As for today we don't support both IOC and ZONE_HIGHMEM enabled
* simultaneously. This happens because as of today IOC aperture covers
@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void)
panic("IOC Aperture start must be aligned to the size of the aperture");
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
/* Re-enable L1 dcache */
__dc_enable();
@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void)
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();
if (is_isa_arcv2() && ioc_enable)
if (is_isa_arcv2() && ioc_exists)
arc_ioc_setup();
if (is_isa_arcv2() && l2_line_sz && slc_enable) {

View File

@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int si_code;
int si_code = 0;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */

View File

@ -360,14 +360,16 @@ v7_dma_inv_range:
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
addne r0, r0, r2
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1
1:
mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
addlo r0, r0, r2
cmplo r0, r1
blo 1b
dsb st
ret lr

View File

@ -73,9 +73,11 @@
/*
* dcimvac: Invalidate data cache line by MVA to PoC
*/
.macro dcimvac, rt, tmp
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro dcimvac\c, rt, tmp
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
.endm
.endr
/*
* dccmvau: Clean data cache line by MVA to PoU
@ -369,14 +371,16 @@ v7m_dma_inv_range:
tst r0, r3
bic r0, r0, r3
dccimvacne r0, r3
addne r0, r0, r2
subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
tst r1, r3
bic r1, r1, r3
dccimvacne r1, r3
1:
dcimvac r0, r3
add r0, r0, r2
cmp r0, r1
1:
dcimvaclo r0, r3
addlo r0, r0, r2
cmplo r0, r1
blo 1b
dsb st
ret lr

View File

@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
int ret;
int ret = -ENXIO;
unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);

View File

@ -274,6 +274,13 @@
.endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
/*
* If we are building for big.Little with branch predictor hardening,
* we need the processor function tables to remain available after boot.
*/
#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
.section ".rodata"
#endif
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
.endif
.size \name\()_processor_functions, . - \name\()_processor_functions
#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
.previous
#endif
.endm
.macro define_cache_functions name:req

View File

@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
memcpy(code, &optprobe_template_entry,
memcpy(code, (unsigned char *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */

View File

@ -343,6 +343,12 @@ vreg_s3c_0p6: smps3 {
};
};
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
};
&i2c10 {
status = "okay";
clock-frequency = <400000>;

View File

@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
}
memcpy((void *)dst, src_start, length);
flush_icache_range(dst, dst + length);
__flush_icache_range(dst, dst + length);
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {

View File

@ -16,7 +16,7 @@
static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
{
pgd &= ~(1<<31);
pgd -= PAGE_OFFSET;
pgd += PHYS_OFFSET;
pgd |= 1;
setup_pgd(pgd, kernel);
@ -29,7 +29,7 @@ static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
static inline unsigned long tlb_get_pgd(void)
{
return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])

View File

@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
KBUILD_CFLAGS_KERNEL += -mlong-calls
endif
# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
# for branches to reach stubs. And multiple .text sections trigger a warning
# when creating the sysfs module information section.
ifndef CONFIG_64BIT
KBUILD_CFLAGS_MODULE += -ffunction-sections
endif
# select which processor to optimise for
cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200

View File

@ -891,6 +891,55 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
return 0;
}
/* Fix the branch target addresses for subprog calls */
static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx, u32 *addrs)
{
const struct bpf_insn *insn = fp->insnsi;
bool func_addr_fixed;
u64 func_addr;
u32 tmp_idx;
int i, ret;
for (i = 0; i < fp->len; i++) {
/*
* During the extra pass, only the branch target addresses for
* the subprog calls need to be fixed. All other instructions
* can left untouched.
*
* The JITed image length does not change because we already
* ensure that the JITed instruction sequence for these calls
* are of fixed length by padding them with NOPs.
*/
if (insn[i].code == (BPF_JMP | BPF_CALL) &&
insn[i].src_reg == BPF_PSEUDO_CALL) {
ret = bpf_jit_get_func_addr(fp, &insn[i], true,
&func_addr,
&func_addr_fixed);
if (ret < 0)
return ret;
/*
* Save ctx->idx as this would currently point to the
* end of the JITed image and set it to the offset of
* the instruction sequence corresponding to the
* subprog call temporarily.
*/
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/*
* Restore ctx->idx here. This is safe as the length
* of the JITed sequence remains unchanged.
*/
ctx->idx = tmp_idx;
}
}
return 0;
}
struct powerpc64_jit_data {
struct bpf_binary_header *header;
u32 *addrs;
@ -989,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
if (extra_pass) {
/*
* Do not touch the prologue and epilogue as they will remain
* unchanged. Only fix the branch target address for subprog
* calls in the body.
*
* This does not change the offsets and lengths of the subprog
* call instruction sequences and hence, the size of the JITed
* image as well.
*/
bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
/* There is no need to perform the usual passes. */
goto skip_codegen_passes;
}
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
@ -1002,6 +1067,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
proglen - (cgctx.idx * 4), cgctx.seen);
}
skip_codegen_passes:
if (bpf_jit_enable > 1)
/*
* Note that we output the base address of the code_base

View File

@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->tbl.map)
return -ENOMEM;
memset(iommu->tbl.map, 0, sz);
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
(tlb_type != hypervisor ? iommu_flushall : NULL),

View File

@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
/* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;

View File

@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
/* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;

View File

@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
/* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;

View File

@ -220,9 +220,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
$(error You are building kernel with non-retpoline compiler, please update your compiler.)
endif
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
endif
@ -307,6 +304,13 @@ ifndef CC_HAVE_ASM_GOTO
@echo Compiler lacks asm-goto support.
@exit 1
endif
ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
@echo "You are building kernel with non-retpoline compiler." >&2
@echo "Please update your compiler." >&2
@false
endif
endif
archclean:
$(Q)rm -rf $(objtree)/arch/i386

View File

@ -1,3 +1,4 @@
/* -----------------------------------------------------------------------
*
* Copyright 2011 Intel Corporation; author Matt Fleming
@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}
static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
unsigned long map_size, desc_size, buff_size;
struct efi_boot_memmap boot_map;
efi_memory_desc_t *map;
efi_status_t status;
__u32 nr_desc;
boot_map.map = &map;
boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table, &boot_map);
if (status != EFI_SUCCESS)
return status;
nr_desc = buff_size / desc_size;
if (nr_desc > ARRAY_SIZE(params->e820_table)) {
u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
if (status != EFI_SUCCESS)
return status;
}
return EFI_SUCCESS;
}
struct exit_boot_struct {
struct boot_params *boot_params;
struct efi_info *efi;
struct setup_data *e820ext;
__u32 e820ext_size;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
static bool first = true;
const char *signature;
__u32 nr_desc;
efi_status_t status;
struct exit_boot_struct *p = priv;
if (first) {
nr_desc = *map->buff_size / *map->desc_size;
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
u32 nr_e820ext = nr_desc -
ARRAY_SIZE(p->boot_params->e820_table);
status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);
if (status != EFI_SUCCESS)
return status;
}
first = false;
}
signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
: EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
struct setup_data *e820ext;
__u32 e820ext_size;
struct setup_data *e820ext = NULL;
__u32 e820ext_size = 0;
efi_status_t status;
__u32 desc_version;
struct efi_boot_memmap map;
@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
priv.e820ext = NULL;
priv.e820ext_size = 0;
status = allocate_e820(boot_params, &e820ext, &e820ext_size);
if (status != EFI_SUCCESS)
return status;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
if (status != EFI_SUCCESS)
return status;
e820ext = priv.e820ext;
e820ext_size = priv.e820ext_size;
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;

View File

@ -566,6 +566,7 @@ ENTRY(interrupt_entry)
ret
END(interrupt_entry)
_ASM_NOKPROBE(interrupt_entry)
/* Interrupt entry/exit. */
@ -766,6 +767,7 @@ native_irq_return_ldt:
jmp native_irq_return_iret
#endif
END(common_interrupt)
_ASM_NOKPROBE(common_interrupt)
/*
* APIC interrupts.
@ -780,6 +782,7 @@ ENTRY(\sym)
call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr
END(\sym)
_ASM_NOKPROBE(\sym)
.endm
/* Make sure APIC interrupt handlers end up in the irqentry section: */
@ -960,6 +963,7 @@ ENTRY(\sym)
jmp error_exit
.endif
_ASM_NOKPROBE(\sym)
END(\sym)
.endm

View File

@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
-z max-page-size=4096 -z common-page-size=4096
-z max-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
-z max-page-size=4096 -z common-page-size=4096
-z max-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)

View File

@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
boot_params->acpi_rsdp_addr = 0;
memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
(char *)&boot_params->ext_ramdisk_image);

View File

@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
int len = 0, ret;
while (len < RELATIVEJUMP_SIZE) {
ret = __copy_instruction(dest + len, src + len, real, &insn);
ret = __copy_instruction(dest + len, src + len, real + len, &insn);
if (!ret || !can_boost(&insn, src + len))
return -EINVAL;
len += ret;

View File

@ -183,7 +183,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--;
}
if (efi_x >= si->lfb_width) {
if (efi_x + font->width > si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}

View File

@ -638,7 +638,7 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
(bfqd->num_active_groups > 0
(bfqd->num_groups_with_pending_reqs > 0
#endif
);
}
@ -802,7 +802,21 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
*/
break;
}
bfqd->num_active_groups--;
/*
* The decrement of num_groups_with_pending_reqs is
* not performed immediately upon the deactivation of
* entity, but it is delayed to when it also happens
* that the first leaf descendant bfqq of entity gets
* all its pending requests completed. The following
* instructions perform this delayed decrement, if
* needed. See the comments on
* num_groups_with_pending_reqs for details.
*/
if (entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = false;
bfqd->num_groups_with_pending_reqs--;
}
}
}
@ -3529,27 +3543,44 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* fact, if there are active groups, then, for condition (i)
* to become false, it is enough that an active group contains
* more active processes or sub-groups than some other active
* group. We address this issue with the following bi-modal
* behavior, implemented in the function
* group. More precisely, for condition (i) to hold because of
* such a group, it is not even necessary that the group is
* (still) active: it is sufficient that, even if the group
* has become inactive, some of its descendant processes still
* have some request already dispatched but still waiting for
* completion. In fact, requests have still to be guaranteed
* their share of the throughput even after being
* dispatched. In this respect, it is easy to show that, if a
* group frequently becomes inactive while still having
* in-flight requests, and if, when this happens, the group is
* not considered in the calculation of whether the scenario
* is asymmetric, then the group may fail to be guaranteed its
* fair share of the throughput (basically because idling may
* not be performed for the descendant processes of the group,
* but it had to be). We address this issue with the
* following bi-modal behavior, implemented in the function
* bfq_symmetric_scenario().
*
* If there are active groups, then the scenario is tagged as
* If there are groups with requests waiting for completion
* (as commented above, some of these groups may even be
* already inactive), then the scenario is tagged as
* asymmetric, conservatively, without checking any of the
* conditions (i) and (ii). So the device is idled for bfqq.
* This behavior matches also the fact that groups are created
* exactly if controlling I/O (to preserve bandwidth and
* latency guarantees) is a primary concern.
* exactly if controlling I/O is a primary concern (to
* preserve bandwidth and latency guarantees).
*
* On the opposite end, if there are no active groups, then
* only condition (i) is actually controlled, i.e., provided
* that condition (i) holds, idling is not performed,
* regardless of whether condition (ii) holds. In other words,
* only if condition (i) does not hold, then idling is
* allowed, and the device tends to be prevented from queueing
* many requests, possibly of several processes. Since there
* are no active groups, then, to control condition (i) it is
* enough to check whether all active queues have the same
* weight.
* On the opposite end, if there are no groups with requests
* waiting for completion, then only condition (i) is actually
* controlled, i.e., provided that condition (i) holds, idling
* is not performed, regardless of whether condition (ii)
* holds. In other words, only if condition (i) does not hold,
* then idling is allowed, and the device tends to be
* prevented from queueing many requests, possibly of several
* processes. Since there are no groups with requests waiting
* for completion, then, to control condition (i) it is enough
* to check just whether all the queues with requests waiting
* for completion also have the same weight.
*
* Not checking condition (ii) evidently exposes bfqq to the
* risk of getting less throughput than its fair share.
@ -3607,10 +3638,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* bfqq is weight-raised is checked explicitly here. More
* precisely, the compound condition below takes into account
* also the fact that, even if bfqq is being weight-raised,
* the scenario is still symmetric if all active queues happen
* to be weight-raised. Actually, we should be even more
* precise here, and differentiate between interactive weight
* raising and soft real-time weight raising.
* the scenario is still symmetric if all queues with requests
* waiting for completion happen to be
* weight-raised. Actually, we should be even more precise
* here, and differentiate between interactive weight raising
* and soft real-time weight raising.
*
* As a side note, it is worth considering that the above
* device-idling countermeasures may however fail in the
@ -5417,7 +5449,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT;
bfqd->num_active_groups = 0;
bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);

View File

@ -196,6 +196,9 @@ struct bfq_entity {
/* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed;
/* flag, set if the entity is counted in groups_with_pending_reqs */
bool in_groups_with_pending_reqs;
};
struct bfq_group;
@ -448,10 +451,54 @@ struct bfq_data {
* bfq_weights_tree_[add|remove] for further details).
*/
struct rb_root queue_weights_tree;
/*
* number of groups with requests still waiting for completion
* Number of groups with at least one descendant process that
* has at least one request waiting for completion. Note that
* this accounts for also requests already dispatched, but not
* yet completed. Therefore this number of groups may differ
* (be larger) than the number of active groups, as a group is
* considered active only if its corresponding entity has
* descendant queues with at least one request queued. This
* number is used to decide whether a scenario is symmetric.
* For a detailed explanation see comments on the computation
* of the variable asymmetric_scenario in the function
* bfq_better_to_idle().
*
* However, it is hard to compute this number exactly, for
* groups with multiple descendant processes. Consider a group
* that is inactive, i.e., that has no descendant process with
* pending I/O inside BFQ queues. Then suppose that
* num_groups_with_pending_reqs is still accounting for this
* group, because the group has descendant processes with some
* I/O request still in flight. num_groups_with_pending_reqs
* should be decremented when the in-flight request of the
* last descendant process is finally completed (assuming that
* nothing else has changed for the group in the meantime, in
* terms of composition of the group and active/inactive state of child
* groups and processes). To accomplish this, an additional
* pending-request counter must be added to entities, and must
* be updated correctly. To avoid this additional field and operations,
* we resort to the following tradeoff between simplicity and
* accuracy: for an inactive group that is still counted in
* num_groups_with_pending_reqs, we decrement
* num_groups_with_pending_reqs when the first descendant
* process of the group remains with no request waiting for
* completion.
*
* Even this simpler decrement strategy requires a little
* carefulness: to avoid multiple decrements, we flag a group,
* more precisely an entity representing a group, as still
* counted in num_groups_with_pending_reqs when it becomes
* inactive. Then, when the first descendant queue of the
* entity remains with no request waiting for completion,
* num_groups_with_pending_reqs is decremented, and this flag
* is reset. After this flag is reset for the entity,
* num_groups_with_pending_reqs won't be decremented any
* longer in case a new descendant queue of the entity remains
* with no request waiting for completion.
*/
unsigned int num_active_groups;
unsigned int num_groups_with_pending_reqs;
/*
* Number of bfq_queues containing requests (including the

View File

@ -1012,7 +1012,10 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
container_of(entity, struct bfq_group, entity);
struct bfq_data *bfqd = bfqg->bfqd;
bfqd->num_active_groups++;
if (!entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = true;
bfqd->num_groups_with_pending_reqs++;
}
}
#endif

View File

@ -1764,7 +1764,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (bypass_insert)
return BLK_STS_RESOURCE;
blk_mq_sched_insert_request(rq, false, run_queue, false);
blk_mq_request_bypass_insert(rq, run_queue);
return BLK_STS_OK;
}
@ -1780,7 +1780,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_sched_insert_request(rq, false, true, false);
blk_mq_request_bypass_insert(rq, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
@ -1815,7 +1815,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
list_add(&rq->queuelist, list);
blk_mq_request_bypass_insert(rq,
list_empty(list));
break;
}
blk_mq_end_request(rq, ret);

View File

@ -1812,7 +1812,7 @@ config CRYPTO_USER_API_AEAD
cipher algorithms.
config CRYPTO_STATS
bool "Crypto usage statistics for User-space"
bool
help
This option enables the gathering of crypto stats.
This will collect:

View File

@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
crypto_mod_put(alg);
if (err)
goto err_free_inst;
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
if (err)
@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;

View File

@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
crypto_mod_put(alg);
if (err)
goto err_free_inst;
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
if (err)
@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;

View File

@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
crypto_mod_put(alg);
if (err)
goto err_free_inst;
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
if (err)
@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;

View File

@ -1308,7 +1308,7 @@ static ssize_t scrub_store(struct device *dev,
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
rc = acpi_nfit_ars_rescan(acpi_desc, 0);
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
device_unlock(dev);
if (rc)

View File

@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*

View File

@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
pr_err("CLK %d has invalid pointer %p\n", id, clk);
return;
}
if (id > unit->nr_clks) {
if (id >= unit->nr_clks) {
pr_err("CLK %d is invalid\n", id);
return;
}

View File

@ -200,11 +200,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
unsigned int idx = clkspec->args[1];
if (type == CP110_CLK_TYPE_CORE) {
if (idx > CP110_MAX_CORE_CLOCKS)
if (idx >= CP110_MAX_CORE_CLOCKS)
return ERR_PTR(-EINVAL);
return clk_data->hws[idx];
} else if (type == CP110_CLK_TYPE_GATABLE) {
if (idx > CP110_MAX_GATABLE_CLOCKS)
if (idx >= CP110_MAX_GATABLE_CLOCKS)
return ERR_PTR(-EINVAL);
return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
}

View File

@ -191,6 +191,22 @@ int qcom_cc_register_sleep_clk(struct device *dev)
}
EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
/* Drop 'protected-clocks' from the list of clocks to register */
static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
{
struct device_node *np = dev->of_node;
struct property *prop;
const __be32 *p;
u32 i;
of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
if (i >= cc->num_rclks)
continue;
cc->rclks[i] = NULL;
}
}
static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@ -251,6 +267,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
cc->rclks = rclks;
cc->num_rclks = num_clks;
qcom_cc_drop_protected(dev, cc);
for (i = 0; i < num_clks; i++) {
if (!rclks[i])
continue;

View File

@ -128,7 +128,7 @@ static const struct zynqmp_eemi_ops *eemi_ops;
*/
static inline int zynqmp_is_valid_clock(u32 clk_id)
{
if (clk_id > clock_max_idx)
if (clk_id >= clock_max_idx)
return -ENODEV;
return clock[clk_id].valid;
@ -279,6 +279,9 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.arg1 = clk_id;
ret = eemi_ops->query_data(qdata, ret_payload);
if (ret)
return ERR_PTR(ret);
mult = ret_payload[1];
div = ret_payload[2];

View File

@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
/*
* Program FIFO size of channels.
*
* By default full FIFO (1024 bytes) is assigned to channel 0. Here we
* By default full FIFO (512 bytes) is assigned to channel 0. Here we
* slice FIFO on equal parts between channels.
*/
static void idma32_fifo_partition(struct dw_dma *dw)
{
u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
IDMA32C_FP_UPDATE;
u64 fifo_partition = 0;
@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
fifo_partition |= value << 32;
/* Program FIFO Partition registers - 128 bytes for each channel */
/* Program FIFO Partition registers - 64 bytes per channel */
idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
}

View File

@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@ -33,6 +32,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/workqueue.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
@ -376,7 +376,7 @@ struct sdma_channel {
u32 shp_addr, per_addr;
enum dma_status status;
struct imx_dma_data data;
struct dma_pool *bd_pool;
struct work_struct terminate_worker;
};
#define IMX_DMA_SG_LOOP BIT(0)
@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
static void sdma_channel_terminate_work(struct work_struct *work)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
terminate_worker);
unsigned long flags;
LIST_HEAD(head);
sdma_disable_channel(chan);
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all.
*/
mdelay(1);
usleep_range(1000, 2000);
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
}
static int sdma_disable_channel_async(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
sdma_disable_channel(chan);
if (sdmac->desc)
schedule_work(&sdmac->terminate_worker);
return 0;
}
static void sdma_channel_synchronize(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
vchan_synchronize(&sdmac->vc);
flush_work(&sdmac->terminate_worker);
}
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@ -1192,10 +1210,11 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
static int sdma_alloc_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
&desc->bd_phys);
desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@ -1206,7 +1225,9 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
static void sdma_free_bd(struct sdma_desc *desc)
{
dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ahb;
sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
sizeof(struct sdma_buffer_descriptor),
32, 0);
return 0;
disable_clk_ahb:
@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
sdma_disable_channel_with_delay(chan);
sdma_disable_channel_async(chan);
sdma_channel_synchronize(chan);
if (sdmac->event_id0)
sdma_event_disable(sdmac, sdmac->event_id0);
@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
dma_pool_destroy(sdmac->bd_pool);
sdmac->bd_pool = NULL;
}
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
sdmac->channel = i;
sdmac->vc.desc_free = sdma_desc_free;
INIT_WORK(&sdmac->terminate_worker,
sdma_channel_terminate_work);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;

View File

@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
if (!cdd->chan_busy[desc_num])
if (!cdd->chan_busy[desc_num]) {
struct cppi41_channel *cc, *_ct;
/*
* channels might still be in the pendling list if
* cppi41_dma_issue_pending() is called after
* cppi41_runtime_suspend() is called
*/
list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
if (cc != c)
continue;
list_del(&cc->node);
break;
}
return 0;
}
ret = cppi41_tear_down_chan(c);
if (ret)

View File

@ -168,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active)
else
timeout = SIRF_HIBERNATE_TIMEOUT;
while (retries-- > 0) {
do {
sirf_pulse_on_off(data);
ret = sirf_wait_for_power_state(data, active, timeout);
if (ret < 0) {
@ -179,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active)
}
break;
}
} while (retries--);
if (retries == 0)
if (retries < 0)
return -ETIMEDOUT;
return 0;

View File

@ -233,7 +233,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 20
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,

View File

@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)

View File

@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->late_init_work);
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->late_init_work);
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);

View File

@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@ -224,12 +227,38 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
(adev->pdev->revision == 0xef) ||
(adev->pdev->revision == 0xff))))
chip_name = "polaris11_k";
else if ((adev->pdev->device == 0x67ef) &&
(adev->pdev->revision == 0xe2))
chip_name = "polaris11_k";
else
chip_name = "polaris11";
break;
case CHIP_POLARIS10:
if ((adev->pdev->device == 0x67df) &&
((adev->pdev->revision == 0xe1) ||
(adev->pdev->revision == 0xf7)))
chip_name = "polaris10_k";
else
chip_name = "polaris10";
break;
case CHIP_POLARIS12:
if (((adev->pdev->device == 0x6987) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc3))) ||
((adev->pdev->device == 0x6981) &&
((adev->pdev->revision == 0x00) ||
(adev->pdev->revision == 0x01) ||
(adev->pdev->revision == 0x10))))
chip_name = "polaris12_k";
else
chip_name = "polaris12";
break;
case CHIP_FIJI:
@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
u32 data, vbios_version;
u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
vbios_version = data & 0xf;
if (vbios_version == 0)
return 0;
if (!adev->gmc.fw)
return -EINVAL;

View File

@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_stop(adev);
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->ready = false;

View File

@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
strncpy(audio_info->display_name,
strscpy(audio_info->display_name,
edid_caps->display_name,
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->freesync_capable = state->freesync_capable;
new_state->freesync_enable = state->freesync_enable;
new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;

View File

@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;

View File

@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
/* Skip for suspend/resume case */
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
&& adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}

View File

@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = phm_pre_display_configuration_changed(hwmgr);
if (ret)
return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;

View File

@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
phm_pre_display_configuration_changed(hwmgr);
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)

View File

@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
if (sclk > sclk_table->dpm_levels[i-1].value) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
sclk_table->dpm_levels[i-1].value = sclk;
}
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
@ -3607,9 +3609,11 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
if (mclk > mclk_table->dpm_levels[i-1].value) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
mclk_table->dpm_levels[i-1].value = mclk;
}
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;

View File

@ -3266,9 +3266,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
if (sclk > sclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
sclk_table->dpm_levels[i-1].value = sclk;
}
}
for (i = 0; i < mclk_table->count; i++) {
if (mclk == mclk_table->dpm_levels[i].value)
@ -3276,9 +3278,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
if (mclk > mclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
mclk_table->dpm_levels[i-1].value = mclk;
}
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;

View File

@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
if (data->smu_features[GNLD_DPM_UVD].enabled &&
(feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
if (data->smu_features[GNLD_DPM_VCE].enabled &&
(feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
if (data->smu_features[GNLD_DPM_UVD].enabled &&
(feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
if (data->smu_features[GNLD_DPM_VCE].enabled &&
(feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);

View File

@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {

View File

@ -54,7 +54,7 @@
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
#define AUX_CMD_SEND BIT(1)
#define AUX_CMD_SEND BIT(0)
#define AUX_CMD_REQ(x) ((x) << 4)
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
#define SN_SSC_CONFIG_REG 0x93

View File

@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem = false;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(fbdev_emulation,
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif

View File

@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);

View File

@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
if (master->lessor) {
/* Tell the master to check the lessee list */
drm_sysfs_hotplug_event(dev);
drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}

View File

@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
connector->kdev = NULL;
}
void drm_sysfs_lease_event(struct drm_device *dev)
{
char *event_string = "LEASE=1";
char *envp[] = { event_string, NULL };
DRM_DEBUG("generating lease event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device

View File

@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
plane->crtc = crtc;
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);

View File

@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
kfree(dpu_enc);
}
void dpu_encoder_helper_split_config(

View File

@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,

View File

@ -39,6 +39,8 @@
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
#define VCO_REF_CLK_RATE 19200000
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
pll_10nm->vco_ref_clk_rate = parent_rate;
pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_10nm);

View File

@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
ret = msm_hdmi_hpd_enable(hdmi->connector);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail;
}
encoder->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;

View File

@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
void msm_hdmi_connector_irq(struct drm_connector *connector);
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
int msm_hdmi_hpd_enable(struct drm_connector *connector);
/*
* i2c adapter for ddc:

View File

@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
static int hpd_enable(struct hdmi_connector *hdmi_connector)
int msm_hdmi_hpd_enable(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector)
@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
ret = hpd_enable(hdmi_connector);
if (ret) {
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
return ERR_PTR(ret);
}
drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;

View File

@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
if (drm_crtc_vblank_get(crtc))
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
drm_crtc_vblank_put(crtc);
}
}

View File

@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
kfree(show_priv);
return ret;
goto free_priv;
}
show_priv->dev = dev;
return single_open(file, msm_gpu_show, show_priv);
ret = single_open(file, msm_gpu_show, show_priv);
if (ret)
goto free_priv;
return 0;
free_priv:
kfree(show_priv);
return ret;
}
static const struct file_operations msm_gpu_fops = {

Some files were not shown because too many files have changed in this diff Show More