From 4920b1f7676d55dcebdf55f2b0431a328acb4abe Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Fri, 2 Nov 2018 16:57:52 +0000 Subject: [PATCH 1/4] mailmap: Update email for Punit Agrawal As I'll no longer be working with Arm, add a mailmap entry so any mail directed towards me reaches the appropriate mailbox. Acked-by: Will Deacon Signed-off-by: Punit Agrawal Signed-off-by: Catalin Marinas --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index a76be45fef6c..28fecafa6506 100644 --- a/.mailmap +++ b/.mailmap @@ -159,6 +159,7 @@ Peter Oruba Peter Oruba Pratyush Anand Praveen BP +Punit Agrawal Qais Yousef Oleksij Rempel Oleksij Rempel From 313a06e636808387822af24c507cba92703568b1 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 5 Nov 2018 18:14:41 -0600 Subject: [PATCH 2/4] lib/raid6: Fix arm64 test build The lib/raid6/test fails to build the neon objects on arm64 because the correct machine type is 'aarch64'. Once this is correctly enabled, the neon recovery objects need to be added to the build. Reviewed-by: Ard Biesheuvel Signed-off-by: Jeremy Linton Signed-off-by: Catalin Marinas --- lib/raid6/test/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 5d73f5cb4d8a..79777645cac9 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -27,7 +27,7 @@ ifeq ($(ARCH),arm) CFLAGS += -I../../../arch/arm/include -mfpu=neon HAS_NEON = yes endif -ifeq ($(ARCH),arm64) +ifeq ($(ARCH),aarch64) CFLAGS += -I../../../arch/arm64/include HAS_NEON = yes endif @@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes) gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) - OBJS += neon.o neon1.o neon2.o neon4.o neon8.o + OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else HAS_ALTIVEC := $(shell printf '\#include \nvector int a;\n' |\ From 26a4676faa1ad5d99317e0cd701e5d6f3e716b77 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 7 Nov 2018 18:10:38 +0100 Subject: [PATCH 3/4] arm64: mm: define NET_IP_ALIGN to 0 On arm64, there is no need to add 2 bytes of padding to the start of each network buffer just to make the IP header appear 32-bit aligned. Since this might actually adversely affect DMA performance some platforms, let's override NET_IP_ALIGN to 0 to get rid of this padding. Acked-by: Ilias Apalodimas Tested-by: Ilias Apalodimas Acked-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3e2091708b8e..6b0d4dff5012 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -24,6 +24,14 @@ #define KERNEL_DS UL(-1) #define USER_DS (TASK_SIZE_64 - 1) +/* + * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is + * no point in shifting all network buffers by 2 bytes just to make some IP + * header fields appear aligned in memory, potentially sacrificing some DMA + * performance on some platforms. + */ +#define NET_IP_ALIGN 0 + #ifndef __ASSEMBLY__ #ifdef __KERNEL__ From 24cc61d8cb5a9232fadf21a830061853c1268fdd Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 7 Nov 2018 15:16:06 +0100 Subject: [PATCH 4/4] arm64: memblock: don't permit memblock resizing until linear mapping is up Bhupesh reports that having numerous memblock reservations at early boot may result in the following crash: Unable to handle kernel paging request at virtual address ffff80003ffe0000 ... Call trace: __memcpy+0x110/0x180 memblock_add_range+0x134/0x2e8 memblock_reserve+0x70/0xb8 memblock_alloc_base_nid+0x6c/0x88 __memblock_alloc_base+0x3c/0x4c memblock_alloc_base+0x28/0x4c memblock_alloc+0x2c/0x38 early_pgtable_alloc+0x20/0xb0 paging_init+0x28/0x7f8 This is caused by the fact that we permit memblock resizing before the linear mapping is up, and so the memblock_reserved() array is moved into memory that is not mapped yet. So let's ensure that this crash can no longer occur, by deferring to call to memblock_allow_resize() to after the linear mapping has been created. Reported-by: Bhupesh Sharma Acked-by: Will Deacon Tested-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 2 -- arch/arm64/mm/mmu.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9d9582cac6c4..9b432d9fcada 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -483,8 +483,6 @@ void __init arm64_memblock_init(void) high_memory = __va(memblock_end_of_DRAM() - 1) + 1; dma_contiguous_reserve(arm64_dma_phys_limit); - - memblock_allow_resize(); } void __init bootmem_init(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 394b8d554def..d1d6601b385d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -659,6 +659,8 @@ void __init paging_init(void) memblock_free(__pa_symbol(init_pg_dir), __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); + + memblock_allow_resize(); } /*