diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 8704153f2675..6dc85d51cd98 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -131,29 +131,18 @@ void __init early_alloc_pgt_buf(void) int after_bootmem; -static int page_size_mask; - early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); -static void __init init_gbpages(void) -{ - if (direct_gbpages && cpu_has_gbpages) { - printk(KERN_INFO "Using GB pages for direct mapping\n"); - page_size_mask |= 1 << PG_LEVEL_1G; - } else - direct_gbpages = 0; -} - struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; +static int page_size_mask; + static void __init probe_page_size_mask(void) { - init_gbpages(); - #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) /* * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. @@ -173,6 +162,14 @@ static void __init probe_page_size_mask(void) cr4_set_bits_and_update_boot(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } + + /* Enable 1 GB linear kernel mappings if available: */ + if (direct_gbpages && cpu_has_gbpages) { + printk(KERN_INFO "Using GB pages for direct mapping\n"); + page_size_mask |= 1 << PG_LEVEL_1G; + } else { + direct_gbpages = 0; + } } #ifdef CONFIG_X86_32