- Fix the vmlinux size check on 64-bit along with adding useful clarifications on the topic

(Arvind Sankar)
 
 - Remove -m16 workaround now that the GCC versions that need it are unsupported
 (Nick Desaulniers)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAl/XhwYACgkQEsHwGGHe
 VUqYPQ/9ECDO6Ms8rkvZf4f7+LfKO8XsRYf0h411h5zuVssgPybIbZCvuiQ5Jj9r
 IzTe0pO0AoVoX5/JE+mxbvroBb5hRokNehJayPmdVjZSrzsYWZR3RVW6iSRo/TQ0
 bjzY1cHC4EPmtyYA4FDr+b0JAEEbyPuPOAcqSzBBadsx5FURvwH7s1hUILCZ1nmy
 i8DADZDo7Puk1W8VQkGkjUeo3QVY8KG2VzZegU0iUR/M7Hoek3WBPObYFg0bsKN4
 6hvRWPMt2lFXOSNZOFhG+LjtWXTJ22g7ENU4SHXhHBdxmM8H/OZvh/3UZTes7rdo
 u/LZJ580GfOa3TYHOToGQq9iBxoi9Se3vLXUtt2I9Z7sbBVUGTKx2Llezvt9/QRh
 kJkpvMc2r2b9n7Cu52gHKQ89OrqFzs2scNyjQ2KRU61bPwLjWoGiobugnl8T9q8Y
 Li8LKS2tCD6yo7L1eYt0NDriWVF3/Tv1rebWMNIf3sGSHFkxgHxFviTx3L3SWEBS
 8JMuKLPh20WC9ombXP/tDN2rtTmmL9hEoqZ9uSTG1tRNLOLLNv790ucUh+lp6RFp
 7y8dzl+L3hXsFdcCYooOIGO1phmdThwSLMqZHmbpRWpKNr+8P0wCqZP16twnoXNC
 kO7OiNd6XHVpJg4WnN3C3X70n7TPXSZ7dkpDNFBJaH2QilNEHa8=
 =Qk/S
 -----END PGP SIGNATURE-----

Merge tag 'x86_build_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 build updates from Borislav Petkov:
 "Two x86 build fixes:

   - Fix the vmlinux size check on 64-bit along with adding useful
     clarifications on the topic (Arvind Sankar)

   - Remove -m16 workaround now that the GCC versions that need it are
     unsupported (Nick Desaulniers)"

* tag 'x86_build_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/build: Remove -m16 workaround for unsupported versions of GCC
  x86/build: Fix vmlinux size check on 64-bit
This commit is contained in:
Linus Torvalds 2020-12-14 13:54:50 -08:00
commit 84292fffc2
7 changed files with 30 additions and 55 deletions

View File

@ -24,14 +24,7 @@ endif
# How to compile the 16-bit code. Note we always compile for -march=i386; # How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient. # that way we can complain to the user if the CPU is insufficient.
# REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
# older versions of GCC, include an *assembly* header to make sure that
# gcc doesn't play any games behind our back.
CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-mno-mmx -mno-sse -mno-mmx -mno-sse

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#
# code16gcc.h
#
# This file is added to the assembler via -Wa when compiling 16-bit C code.
# This is done this way instead via asm() to make sure gcc does not reorder
# things around us.
#
# gcc 4.9+ has a real -m16 option so we can drop this hack long term.
#
.code16gcc

View File

@ -53,7 +53,13 @@
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
/* /*
* Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
* address for the kernel image, rather than the limit on the size itself. On
* 32-bit, this is not a strict limit, but this value is used to limit the
* link-time virtual address range of the kernel, and by KASLR to limit the
* randomized address from which the kernel is executed. A relocatable kernel
* can be loaded somewhat higher than KERNEL_IMAGE_SIZE as long as enough space
* remains for the vmalloc area.
*/ */
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)

View File

@ -98,8 +98,10 @@
#define STACK_TOP_MAX TASK_SIZE_MAX #define STACK_TOP_MAX TASK_SIZE_MAX
/* /*
* Maximum kernel image size is limited to 1 GiB, due to the fixmap living * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
* in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). * address for the kernel image, rather than the limit on the size itself.
* This can be at most 1 GiB, due to the fixmap living in the next 1 GiB (see
* level2_kernel_pgt in arch/x86/kernel/head_64.S).
* *
* On KASLR use 1 GiB by default, leaving 1 GiB for modules once the * On KASLR use 1 GiB by default, leaving 1 GiB for modules once the
* page tables are fully set up. * page tables are fully set up.

View File

@ -57,19 +57,13 @@ do { \
#endif #endif
/* /*
* This is how much memory in addition to the memory covered up to * This is used to calculate the .brk reservation for initial pagetables.
* and including _end we need mapped initially. * Enough space is reserved to allocate pagetables sufficient to cover all
* We need: * of LOWMEM_PAGES, which is an upper bound on the size of the direct map of
* (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) * lowmem.
* (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
* *
* Modulo rounding, each megabyte assigned here requires a kilobyte of * With PAE paging (PTRS_PER_PMD > 1), we allocate PTRS_PER_PGD == 4 pages for
* memory, which is currently unreclaimed. * the PMD's in addition to the pages required for the last level pagetables.
*
* This should be a multiple of a page.
*
* KERNEL_IMAGE_SIZE should be greater than pa(_end)
* and small than max_low_pfn, otherwise will waste some page table entries
*/ */
#if PTRS_PER_PMD > 1 #if PTRS_PER_PMD > 1
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)

View File

@ -531,21 +531,19 @@ SYM_DATA_END(level3_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
/* /*
* 512 MB kernel mapping. We spend a full page on this pagetable * Kernel high mapping.
* anyway.
* *
* The kernel code+data+bss must not be bigger than that. * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
* virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
* 512 MiB otherwise.
* *
* (NOTE: at +512MB starts the module area, see MODULES_VADDR. * (NOTE: after that starts the module area, see MODULES_VADDR.)
* If you want to increase this then increase MODULES_VADDR
* too.)
* *
* This table is eventually used by the kernel during normal * This table is eventually used by the kernel during normal runtime.
* runtime. Care must be taken to clear out undesired bits * Care must be taken to clear out undesired bits later, like _PAGE_RW
* later, like _PAGE_RW or _PAGE_GLOBAL in some cases. * or _PAGE_GLOBAL in some cases.
*/ */
PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
KERNEL_IMAGE_SIZE/PMD_SIZE)
SYM_DATA_END(level2_kernel_pgt) SYM_DATA_END(level2_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)

View File

@ -454,13 +454,13 @@ SECTIONS
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
} }
#ifdef CONFIG_X86_32
/* /*
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
*/ */
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE"); "kernel image bigger than KERNEL_IMAGE_SIZE");
#else
#ifdef CONFIG_X86_64
/* /*
* Per-cpu symbols which need to be offset from __per_cpu_load * Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor. * for the boot processor.
@ -470,18 +470,12 @@ INIT_PER_CPU(gdt_page);
INIT_PER_CPU(fixed_percpu_data); INIT_PER_CPU(fixed_percpu_data);
INIT_PER_CPU(irq_stack_backing_store); INIT_PER_CPU(irq_stack_backing_store);
/*
* Build-time check on the image size:
*/
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
. = ASSERT((fixed_percpu_data == 0), . = ASSERT((fixed_percpu_data == 0),
"fixed_percpu_data is not at start of per-cpu area"); "fixed_percpu_data is not at start of per-cpu area");
#endif #endif
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_64 */
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
#include <asm/kexec.h> #include <asm/kexec.h>