Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes: - two boot crash fixes - unwinder fixes - kexec related kernel direct mappings enhancements/fixes - more Clang support quirks - minor cleanups - Documentation fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel_rdt: Fix a typo in Documentation x86/build: Don't add -maccumulate-outgoing-args w/o compiler support x86/boot/32: Fix UP boot on Quark and possibly other platforms x86/mm/32: Set the '__vmalloc_start_set' flag in initmem_init() x86/kexec/64: Use gbpages for identity mappings if available x86/mm: Add support for gbpages to kernel_ident_mapping_init() x86/boot: Declare error() as noreturn x86/mm/kaslr: Use the _ASM_MUL macro for multiplication to work around Clang incompatibility x86/mm: Fix boot crash caused by incorrect loop count calculation in sync_global_pgds() x86/asm: Don't use RBP as a temporary register in csum_partial_copy_generic() x86/microcode/AMD: Remove redundant NULL check on mc
This commit is contained in:
commit
f1e0527d2d
|
@ -295,7 +295,7 @@ kernel and the tasks running there get 50% of the cache. They should
|
|||
also get 50% of memory bandwidth assuming that the cores 4-7 are SMT
|
||||
siblings and only the real time threads are scheduled on the cores 4-7.
|
||||
|
||||
# echo C0 > p0/cpus
|
||||
# echo F0 > p0/cpus
|
||||
|
||||
4) Locking between applications
|
||||
|
||||
|
|
|
@ -179,7 +179,8 @@ ifdef CONFIG_JUMP_LABEL
|
|||
endif
|
||||
|
||||
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
||||
KBUILD_CFLAGS += -maccumulate-outgoing-args
|
||||
# This compiler flag is not supported by Clang:
|
||||
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
|
||||
endif
|
||||
|
||||
# Stackpointer is addressed different for 32 bit and 64 bit x86
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
#ifndef BOOT_COMPRESSED_ERROR_H
|
||||
#define BOOT_COMPRESSED_ERROR_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
void warn(char *m);
|
||||
void error(char *m);
|
||||
void error(char *m) __noreturn;
|
||||
|
||||
#endif /* BOOT_COMPRESSED_ERROR_H */
|
||||
|
|
|
@ -70,7 +70,7 @@ static unsigned long level4p;
|
|||
* Due to relocation, pointers must be assigned at run time not build time.
|
||||
*/
|
||||
static struct x86_mapping_info mapping_info = {
|
||||
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
};
|
||||
|
||||
/* Locates and clears a region for a new top level page table. */
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#define _ASM_ADD __ASM_SIZE(add)
|
||||
#define _ASM_SUB __ASM_SIZE(sub)
|
||||
#define _ASM_XADD __ASM_SIZE(xadd)
|
||||
#define _ASM_MUL __ASM_SIZE(mul)
|
||||
|
||||
#define _ASM_AX __ASM_REG(ax)
|
||||
#define _ASM_BX __ASM_REG(bx)
|
||||
|
|
|
@ -4,8 +4,9 @@
|
|||
struct x86_mapping_info {
|
||||
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
|
||||
void *context; /* context for alloc_pgt_page */
|
||||
unsigned long pmd_flag; /* page flag for PMD entry */
|
||||
unsigned long page_flag; /* page flag for PMD or PUD entry */
|
||||
unsigned long offset; /* ident mapping offset */
|
||||
bool direct_gbpages; /* PUD level 1GB page support */
|
||||
};
|
||||
|
||||
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
|
|
|
@ -352,8 +352,6 @@ void reload_ucode_amd(void)
|
|||
u32 rev, dummy;
|
||||
|
||||
mc = (struct microcode_amd *)amd_ucode_patch;
|
||||
if (!mc)
|
||||
return;
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
|
|||
struct x86_mapping_info info = {
|
||||
.alloc_pgt_page = alloc_pgt_page,
|
||||
.context = image,
|
||||
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
};
|
||||
unsigned long mstart, mend;
|
||||
pgd_t *level4p;
|
||||
|
@ -123,6 +123,10 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
|
|||
|
||||
level4p = (pgd_t *)__va(start_pgtable);
|
||||
clear_page(level4p);
|
||||
|
||||
if (direct_gbpages)
|
||||
info.direct_gbpages = true;
|
||||
|
||||
for (i = 0; i < nr_pfn_mapped; i++) {
|
||||
mstart = pfn_mapped[i].start << PAGE_SHIFT;
|
||||
mend = pfn_mapped[i].end << PAGE_SHIFT;
|
||||
|
|
|
@ -1225,6 +1225,21 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
kasan_init();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* sync back kernel address range */
|
||||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
/*
|
||||
* sync back low identity map too. It is used for example
|
||||
* in the 32-bit EFI stub.
|
||||
*/
|
||||
clone_pgd_range(initial_page_table,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
||||
#endif
|
||||
|
||||
tboot_probe();
|
||||
|
||||
map_vsyscall();
|
||||
|
|
|
@ -291,11 +291,11 @@ void __init setup_per_cpu_areas(void)
|
|||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Sync back kernel address range. We want to make sure that
|
||||
* all kernel mappings, including percpu mappings, are available
|
||||
* in the smpboot asm. We can't reliably pick up percpu
|
||||
* mappings using vmalloc_fault(), because exception dispatch
|
||||
* needs percpu data.
|
||||
* Sync back kernel address range again. We already did this in
|
||||
* setup_arch(), but percpu data also needs to be available in
|
||||
* the smpboot asm. We can't reliably pick up percpu mappings
|
||||
* using vmalloc_fault(), because exception dispatch needs
|
||||
* percpu data.
|
||||
*/
|
||||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
|
|
|
@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
movq %r12, 3*8(%rsp)
|
||||
movq %r14, 4*8(%rsp)
|
||||
movq %r13, 5*8(%rsp)
|
||||
movq %rbp, 6*8(%rsp)
|
||||
movq %r15, 6*8(%rsp)
|
||||
|
||||
movq %r8, (%rsp)
|
||||
movq %r9, 1*8(%rsp)
|
||||
|
@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
/* main loop. clear in 64 byte blocks */
|
||||
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
|
||||
/* r11: temp3, rdx: temp4, r12 loopcnt */
|
||||
/* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
|
||||
/* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
|
||||
.p2align 4
|
||||
.Lloop:
|
||||
source
|
||||
|
@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
source
|
||||
movq 32(%rdi), %r10
|
||||
source
|
||||
movq 40(%rdi), %rbp
|
||||
movq 40(%rdi), %r15
|
||||
source
|
||||
movq 48(%rdi), %r14
|
||||
source
|
||||
|
@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
adcq %r11, %rax
|
||||
adcq %rdx, %rax
|
||||
adcq %r10, %rax
|
||||
adcq %rbp, %rax
|
||||
adcq %r15, %rax
|
||||
adcq %r14, %rax
|
||||
adcq %r13, %rax
|
||||
|
||||
|
@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
dest
|
||||
movq %r10, 32(%rsi)
|
||||
dest
|
||||
movq %rbp, 40(%rsi)
|
||||
movq %r15, 40(%rsi)
|
||||
dest
|
||||
movq %r14, 48(%rsi)
|
||||
dest
|
||||
|
@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
movq 3*8(%rsp), %r12
|
||||
movq 4*8(%rsp), %r14
|
||||
movq 5*8(%rsp), %r13
|
||||
movq 6*8(%rsp), %rbp
|
||||
movq 6*8(%rsp), %r15
|
||||
addq $7*8, %rsp
|
||||
ret
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
* kernel starts. This file is included in the compressed kernel and
|
||||
* normally linked in the regular.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/kaslr.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/archrandom.h>
|
||||
|
@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose)
|
|||
}
|
||||
|
||||
/* Circular multiply for better bit diffusion */
|
||||
asm("mul %3"
|
||||
asm(_ASM_MUL "%3"
|
||||
: "=a" (random), "=d" (raw)
|
||||
: "a" (random), "rm" (mix_const));
|
||||
random += raw;
|
||||
|
|
|
@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
|
|||
if (pmd_present(*pmd))
|
||||
continue;
|
||||
|
||||
set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
|
||||
set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
|||
if (next > end)
|
||||
next = end;
|
||||
|
||||
if (info->direct_gbpages) {
|
||||
pud_t pudval;
|
||||
|
||||
if (pud_present(*pud))
|
||||
continue;
|
||||
|
||||
addr &= PUD_MASK;
|
||||
pudval = __pud((addr - info->offset) | info->page_flag);
|
||||
set_pud(pud, pudval);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pud_present(*pud)) {
|
||||
pmd = pmd_offset(pud, 0);
|
||||
ident_pmd_init(info, pmd, addr, next);
|
||||
|
|
|
@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup);
|
|||
*/
|
||||
void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long address;
|
||||
unsigned long addr;
|
||||
|
||||
for (address = start; address <= end; address += PGDIR_SIZE) {
|
||||
pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
|
||||
pgd_t *pgd_ref = pgd_offset_k(addr);
|
||||
const p4d_t *p4d_ref;
|
||||
struct page *page;
|
||||
|
||||
|
@ -106,7 +106,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|||
* handle synchonization on p4d level.
|
||||
*/
|
||||
BUILD_BUG_ON(pgd_none(*pgd_ref));
|
||||
p4d_ref = p4d_offset(pgd_ref, address);
|
||||
p4d_ref = p4d_offset(pgd_ref, addr);
|
||||
|
||||
if (p4d_none(*p4d_ref))
|
||||
continue;
|
||||
|
@ -117,8 +117,8 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|||
p4d_t *p4d;
|
||||
spinlock_t *pgt_lock;
|
||||
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
/* the pgt_lock only for Xen */
|
||||
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
||||
spin_lock(pgt_lock);
|
||||
|
|
|
@ -100,5 +100,6 @@ void __init initmem_init(void)
|
|||
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
|
||||
(ulong) pfn_to_kaddr(highstart_pfn));
|
||||
|
||||
__vmalloc_start_set = true;
|
||||
setup_bootmem_allocator();
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
|
|||
{
|
||||
struct x86_mapping_info info = {
|
||||
.alloc_pgt_page = alloc_pgt_page,
|
||||
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
|
||||
.offset = __PAGE_OFFSET,
|
||||
};
|
||||
unsigned long mstart, mend;
|
||||
|
|
Loading…
Reference in New Issue