x86-32, NUMA: Add @start and @end to init_alloc_remap()

Instead of dereferencing node_start/end_pfn[] directly, make
init_alloc_remap() take @start and @end and let the caller be
responsible for making sure the range is sane.  This is to prepare for
use from unified NUMA init code.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
This commit is contained in:
Tejun Heo 2011-05-02 14:18:54 +02:00
parent 38f3e1ca24
commit 99cca492ea
1 changed files with 14 additions and 15 deletions

View File

@ -265,8 +265,10 @@ void resume_map_numa_kva(pgd_t *pgd_base)
* opportunistically and the callers will fall back to other memory * opportunistically and the callers will fall back to other memory
* allocation mechanisms on failure. * allocation mechanisms on failure.
*/ */
static __init void init_alloc_remap(int nid) static __init void init_alloc_remap(int nid, u64 start, u64 end)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long end_pfn = end >> PAGE_SHIFT;
unsigned long size, pfn; unsigned long size, pfn;
u64 node_pa, remap_pa; u64 node_pa, remap_pa;
void *remap_va; void *remap_va;
@ -276,24 +278,15 @@ static __init void init_alloc_remap(int nid)
* memory could be added but not currently present. * memory could be added but not currently present.
*/ */
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
nid, node_start_pfn[nid], node_end_pfn[nid]); nid, start_pfn, end_pfn);
if (node_start_pfn[nid] > max_pfn)
return;
if (!node_end_pfn[nid])
return;
if (node_end_pfn[nid] > max_pfn)
node_end_pfn[nid] = max_pfn;
/* calculate the necessary space aligned to large page size */ /* calculate the necessary space aligned to large page size */
size = node_memmap_size_bytes(nid, node_start_pfn[nid], size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
min(node_end_pfn[nid], max_pfn));
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
size = ALIGN(size, LARGE_PAGE_BYTES); size = ALIGN(size, LARGE_PAGE_BYTES);
/* allocate node memory and the lowmem remap area */ /* allocate node memory and the lowmem remap area */
node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
(u64)node_end_pfn[nid] << PAGE_SHIFT,
size, LARGE_PAGE_BYTES);
if (node_pa == MEMBLOCK_ERROR) { if (node_pa == MEMBLOCK_ERROR) {
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
size, nid); size, nid);
@ -391,8 +384,14 @@ void __init initmem_init(void)
get_memcfg_numa(); get_memcfg_numa();
numa_init_array(); numa_init_array();
for_each_online_node(nid) for_each_online_node(nid) {
init_alloc_remap(nid); u64 start = (u64)node_start_pfn[nid] << PAGE_SHIFT;
u64 end = min((u64)node_end_pfn[nid] << PAGE_SHIFT,
(u64)max_pfn << PAGE_SHIFT);
if (start < end)
init_alloc_remap(nid, start, end);
}
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn; highstart_pfn = highend_pfn = max_pfn;