x86/mm: Remove kernel_unmap_pages_in_pgd() and efi_cleanup_page_tables()
kernel_unmap_pages_in_pgd() is dangerous: if a PGD entry in init_mm.pgd were to be cleared, callers would need to ensure that the pgd entry hadn't been propagated to any other pgd. Its only caller was efi_cleanup_page_tables(), and that, in turn, was unused, so just delete both functions. This leaves a couple of other helpers unused, so delete them, too. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk> Acked-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/77ff20fdde3b75cd393be5559ad8218870520248.1468527351.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
360cb4d155
commit
d92fc69cca
|
@ -125,7 +125,6 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
|
|||
extern void efi_sync_low_kernel_mappings(void);
|
||||
extern int __init efi_alloc_page_tables(void);
|
||||
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
||||
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
||||
extern void __init old_map_region(efi_memory_desc_t *md);
|
||||
extern void __init runtime_code_page_mkexec(void);
|
||||
extern void __init efi_runtime_update_mappings(void);
|
||||
|
|
|
@ -481,8 +481,6 @@ extern pmd_t *lookup_pmd_address(unsigned long address);
|
|||
extern phys_addr_t slow_virt_to_phys(void *__address);
|
||||
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
|
||||
unsigned numpages, unsigned long page_flags);
|
||||
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
|
||||
unsigned numpages);
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_DEFS_H */
|
||||
|
|
|
@ -746,18 +746,6 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool try_to_free_pud_page(pud_t *pud)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++)
|
||||
if (!pud_none(pud[i]))
|
||||
return false;
|
||||
|
||||
free_page((unsigned long)pud);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, start);
|
||||
|
@ -871,16 +859,6 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
|
|||
*/
|
||||
}
|
||||
|
||||
static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
|
||||
{
|
||||
pgd_t *pgd_entry = root + pgd_index(addr);
|
||||
|
||||
unmap_pud_range(pgd_entry, addr, end);
|
||||
|
||||
if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
|
||||
pgd_clear(pgd_entry);
|
||||
}
|
||||
|
||||
static int alloc_pte_page(pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
||||
|
@ -1994,12 +1972,6 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
|
|||
return retval;
|
||||
}
|
||||
|
||||
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
|
||||
unsigned numpages)
|
||||
{
|
||||
unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
* The testcases use internal knowledge of the implementation that shouldn't
|
||||
* be exposed to the rest of the kernel. Include these directly here.
|
||||
|
|
|
@ -978,8 +978,6 @@ static void __init __efi_enter_virtual_mode(void)
|
|||
* EFI mixed mode we need all of memory to be accessible when
|
||||
* we pass parameters to the EFI runtime services in the
|
||||
* thunking code.
|
||||
*
|
||||
* efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
|
||||
*/
|
||||
free_pages((unsigned long)new_memmap, pg_shift);
|
||||
|
||||
|
|
|
@ -49,9 +49,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
{
|
||||
}
|
||||
|
||||
void __init efi_map_region(efi_memory_desc_t *md)
|
||||
{
|
||||
|
|
|
@ -285,11 +285,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
{
|
||||
kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
|
||||
}
|
||||
|
||||
static void __init __map_region(efi_memory_desc_t *md, u64 va)
|
||||
{
|
||||
unsigned long flags = _PAGE_RW;
|
||||
|
|
Loading…
Reference in New Issue