From 681ff0181bbfb183e32bc6beb6ec076304470479 Mon Sep 17 00:00:00 2001 From: Arvind Sankar Date: Thu, 5 Mar 2020 10:01:52 -0500 Subject: [PATCH 1/5] x86/mm/init/32: Stop printing the virtual memory layout For security reasons, don't display the kernel's virtual memory layout. Kees Cook points out: "These have been entirely removed on other architectures, so let's just do the same for ia32 and remove it unconditionally." 071929dbdd86 ("arm64: Stop printing the virtual memory layout") 1c31d4e96b8c ("ARM: 8820/1: mm: Stop printing the virtual memory layout") 31833332f798 ("m68k/mm: Stop printing the virtual memory layout") fd8d0ca25631 ("parisc: Hide virtual kernel memory layout") adb1fe9ae2ee ("mm/page_alloc: Remove kernel address exposure in free_reserved_area()") Signed-off-by: Arvind Sankar Signed-off-by: Thomas Gleixner Acked-by: Tycho Andersen Acked-by: Kees Cook Link: https://lkml.kernel.org/r/20200305150152.831697-1-nivedita@alum.mit.edu --- arch/x86/mm/init_32.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 23df4885bbed..8ae0272c1c51 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -788,44 +788,6 @@ void __init mem_init(void) x86_init.hyper.init_after_bootmem(); mem_init_print_info(NULL); - printk(KERN_INFO "virtual kernel memory layout:\n" - " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" - " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" -#ifdef CONFIG_HIGHMEM - " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" -#endif - " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" - " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" - " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" - " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" - " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", - FIXADDR_START, FIXADDR_TOP, - (FIXADDR_TOP - FIXADDR_START) >> 10, - - CPU_ENTRY_AREA_BASE, - CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, - CPU_ENTRY_AREA_MAP_SIZE >> 10, - -#ifdef CONFIG_HIGHMEM - PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, - (LAST_PKMAP*PAGE_SIZE) >> 10, -#endif - - VMALLOC_START, VMALLOC_END, - (VMALLOC_END - VMALLOC_START) >> 20, - - (unsigned long)__va(0), (unsigned long)high_memory, - ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, - - (unsigned long)&__init_begin, (unsigned long)&__init_end, - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10, - - (unsigned long)&_etext, (unsigned long)&_edata, - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, - - (unsigned long)&_text, (unsigned long)&_etext, - ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can From 6a9feaa8774f3b8210dfe40626a75ca047e4ecae Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 5 Feb 2020 15:34:26 +0100 Subject: [PATCH 2/5] x86/mm/kmmio: Use this_cpu_ptr() instead get_cpu_var() for kmmio_ctx Both call sites that access kmmio_ctx, access kmmio_ctx with interrupts disabled. There is no need to use get_cpu_var() which additionally disables preemption. Use this_cpu_ptr() to access the kmmio_ctx variable of the current CPU. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200205143426.2592512-1-bigeasy@linutronix.de --- arch/x86/mm/kmmio.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 49d7814b59a9..9994353fb75d 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -260,7 +260,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) goto no_kmmio; } - ctx = &get_cpu_var(kmmio_ctx); + ctx = this_cpu_ptr(&kmmio_ctx); if (ctx->active) { if (page_base == ctx->addr) { /* @@ -285,7 +285,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); disarm_kmmio_fault_page(faultpage); } - goto no_kmmio_ctx; + goto no_kmmio; } ctx->active++; @@ -314,11 +314,8 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) * the user should drop to single cpu before tracing. */ - put_cpu_var(kmmio_ctx); return 1; /* fault handled */ -no_kmmio_ctx: - put_cpu_var(kmmio_ctx); no_kmmio: rcu_read_unlock(); preempt_enable_no_resched(); @@ -333,7 +330,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) { int ret = 0; - struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); + struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx); if (!ctx->active) { /* @@ -371,7 +368,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) if (!(regs->flags & X86_EFLAGS_TF)) ret = 1; out: - put_cpu_var(kmmio_ctx); return ret; } From 6db73f17c5f155dbcfd5e48e621c706270b84df0 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 4 Mar 2020 12:45:26 +0100 Subject: [PATCH 3/5] x86: Don't let pgprot_modify() change the page encryption bit When SEV or SME is enabled and active, vm_get_page_prot() typically returns with the encryption bit set. This means that users of pgprot_modify(, vm_get_page_prot()) (mprotect_fixup(), do_mmap()) end up with a value of vma->vm_pg_prot that is not consistent with the intended protection of the PTEs. This is also important for fault handlers that rely on the VMA vm_page_prot to set the page protection. Fix this by not allowing pgprot_modify() to change the encryption bit, similar to how it's done for PAT bits. Signed-off-by: Thomas Hellstrom Signed-off-by: Borislav Petkov Reviewed-by: Dave Hansen Acked-by: Tom Lendacky Link: https://lkml.kernel.org/r/20200304114527.3636-2-thomas_os@shipmail.org --- arch/x86/include/asm/pgtable.h | 7 +++++-- arch/x86/include/asm/pgtable_types.h | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7e118660bbd9..64a03f226ab7 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -627,12 +627,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return __pmd(val); } -/* mprotect needs to preserve PAT bits when updating vm_page_prot */ +/* + * mprotect needs to preserve PAT and encryption bits when updating + * vm_page_prot + */ #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; - pgprotval_t addbits = pgprot_val(newprot); + pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK; return __pgprot(preservebits | addbits); } diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 0239998d8cdc..65c2ecd730c5 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -118,7 +118,7 @@ */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) /* From 17c4a2ae15a7aaefe84bdb271952678c5c9cd8e1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 4 Mar 2020 12:45:27 +0100 Subject: [PATCH 4/5] dma-mapping: Fix dma_pgprot() for unencrypted coherent pages When dma_mmap_coherent() sets up a mapping to unencrypted coherent memory under SEV encryption and sometimes under SME encryption, it will actually set up an encrypted mapping rather than an unencrypted, causing devices that DMAs from that memory to read encrypted contents. Fix this. When force_dma_unencrypted() returns true, the linear kernel map of the coherent pages have had the encryption bit explicitly cleared and the page content is unencrypted. Make sure that any additional PTEs we set up to these pages also have the encryption bit cleared by having dma_pgprot() return a protection with the encryption bit cleared in this case. Signed-off-by: Thomas Hellstrom Signed-off-by: Borislav Petkov Reviewed-by: Christoph Hellwig Acked-by: Tom Lendacky Link: https://lkml.kernel.org/r/20200304114527.3636-3-thomas_os@shipmail.org --- kernel/dma/mapping.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 12ff766ec1fa..98e3d873792e 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -154,6 +154,8 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs); */ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { + if (force_dma_unencrypted(dev)) + prot = pgprot_decrypted(prot); if (dev_is_dma_coherent(dev) || (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && (attrs & DMA_ATTR_NON_CONSISTENT))) From aa61ee7b9ee3cb84c0d3a842b0d17937bf024c46 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 11 Mar 2020 09:18:23 +0800 Subject: [PATCH 5/5] x86/mm: Remove the now redundant N_MEMORY check In commit f70029bbaacb ("mm, memory_hotplug: drop CONFIG_MOVABLE_NODE") the dependency on CONFIG_MOVABLE_NODE was removed for N_MEMORY. Before, CONFIG_HIGHMEM && !CONFIG_MOVABLE_NODE could make (N_MEMORY == N_NORMAL_MEMORY) be true. After that commit, N_MEMORY cannot be equal to N_NORMAL_MEMORY. So the conditional check in paging_init() is not needed anymore, remove it. [ bp: Massage. ] Signed-off-by: Baoquan He Signed-off-by: Borislav Petkov Reviewed-by: Wei Yang Acked-by: Michal Hocko Link: https://lkml.kernel.org/r/20200311011823.27740-1-bhe@redhat.com --- arch/x86/mm/init_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index abbdecb75fad..0a14711d3a93 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -818,8 +818,7 @@ void __init paging_init(void) * will not set it back. */ node_clear_state(0, N_MEMORY); - if (N_MEMORY != N_NORMAL_MEMORY) - node_clear_state(0, N_NORMAL_MEMORY); + node_clear_state(0, N_NORMAL_MEMORY); zone_sizes_init(); }