mirror of https://gitee.com/openkylin/linux.git
x86-32, numa: Remove redundant top-down alloc code from remap initialization
memblock_find_in_range() now does top-down allocation by default, so there's no reason for its callers to explicitly implement it by gradually lowering the start address. Remove redundant top-down allocation logic from init_meminit() and calculate_numa_remap_pages(). Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-4-git-send-email-tj@kernel.org Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
a6c24f7a70
commit
5b8443b25c
|
@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
|
||||||
unsigned long size, reserve_pages = 0;
|
unsigned long size, reserve_pages = 0;
|
||||||
|
|
||||||
for_each_online_node(nid) {
|
for_each_online_node(nid) {
|
||||||
u64 node_kva_target;
|
u64 node_kva;
|
||||||
u64 node_kva_final;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The acpi/srat node info can show hot-add memroy zones
|
* The acpi/srat node info can show hot-add memroy zones
|
||||||
|
@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
|
||||||
/* now the roundup is correct, convert to PAGE_SIZE pages */
|
/* now the roundup is correct, convert to PAGE_SIZE pages */
|
||||||
size = size * PTRS_PER_PTE;
|
size = size * PTRS_PER_PTE;
|
||||||
|
|
||||||
node_kva_target = round_down(node_end_pfn[nid] - size,
|
node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
|
||||||
PTRS_PER_PTE);
|
|
||||||
node_kva_target <<= PAGE_SHIFT;
|
|
||||||
do {
|
|
||||||
node_kva_final = memblock_find_in_range(node_kva_target,
|
|
||||||
((u64)node_end_pfn[nid])<<PAGE_SHIFT,
|
((u64)node_end_pfn[nid])<<PAGE_SHIFT,
|
||||||
((u64)size)<<PAGE_SHIFT,
|
((u64)size)<<PAGE_SHIFT,
|
||||||
LARGE_PAGE_BYTES);
|
LARGE_PAGE_BYTES);
|
||||||
node_kva_target -= LARGE_PAGE_BYTES;
|
if (node_kva == MEMBLOCK_ERROR)
|
||||||
} while (node_kva_final == MEMBLOCK_ERROR &&
|
|
||||||
(node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
|
|
||||||
|
|
||||||
if (node_kva_final == MEMBLOCK_ERROR)
|
|
||||||
panic("Can not get kva ram\n");
|
panic("Can not get kva ram\n");
|
||||||
|
|
||||||
node_remap_size[nid] = size;
|
node_remap_size[nid] = size;
|
||||||
|
@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
|
||||||
reserve_pages += size;
|
reserve_pages += size;
|
||||||
printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
|
printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
|
||||||
" node %d at %llx\n",
|
" node %d at %llx\n",
|
||||||
size, nid, node_kva_final>>PAGE_SHIFT);
|
size, nid, node_kva >> PAGE_SHIFT);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prevent kva address below max_low_pfn want it on system
|
* prevent kva address below max_low_pfn want it on system
|
||||||
|
@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
|
||||||
* to use it as free.
|
* to use it as free.
|
||||||
* So memblock_x86_reserve_range here, hope we don't run out of that array
|
* So memblock_x86_reserve_range here, hope we don't run out of that array
|
||||||
*/
|
*/
|
||||||
memblock_x86_reserve_range(node_kva_final,
|
memblock_x86_reserve_range(node_kva,
|
||||||
node_kva_final+(((u64)size)<<PAGE_SHIFT),
|
node_kva + (((u64)size)<<PAGE_SHIFT),
|
||||||
"KVA RAM");
|
"KVA RAM");
|
||||||
|
|
||||||
node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
|
node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
|
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
|
||||||
reserve_pages);
|
reserve_pages);
|
||||||
|
@ -356,7 +347,6 @@ static void init_remap_allocator(int nid)
|
||||||
void __init initmem_init(void)
|
void __init initmem_init(void)
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
long kva_target_pfn;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When mapping a NUMA machine we allocate the node_mem_map arrays
|
* When mapping a NUMA machine we allocate the node_mem_map arrays
|
||||||
|
@ -371,15 +361,10 @@ void __init initmem_init(void)
|
||||||
|
|
||||||
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
|
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
|
||||||
|
|
||||||
kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
|
kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
|
||||||
do {
|
max_low_pfn << PAGE_SHIFT,
|
||||||
kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
|
kva_pages << PAGE_SHIFT,
|
||||||
max_low_pfn<<PAGE_SHIFT,
|
PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT;
|
||||||
kva_pages<<PAGE_SHIFT,
|
|
||||||
PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
|
|
||||||
kva_target_pfn -= PTRS_PER_PTE;
|
|
||||||
} while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
|
|
||||||
|
|
||||||
if (kva_start_pfn == MEMBLOCK_ERROR)
|
if (kva_start_pfn == MEMBLOCK_ERROR)
|
||||||
panic("Can not get kva space\n");
|
panic("Can not get kva space\n");
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue