mirror of https://gitee.com/openkylin/linux.git
[PATCH] ARM: Move memmap freeing into init.c
It doesn't make sense for this to be in mm-armv.c now that 26-bit ARM support is no longer integrated into arch/arm. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
a343e6075a
commit
a013053d49
|
@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
|
|||
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
struct page *start_pg, *end_pg;
|
||||
unsigned long pg, pgend;
|
||||
|
||||
/*
|
||||
* Convert start_pfn/end_pfn to a struct page pointer.
|
||||
*/
|
||||
start_pg = pfn_to_page(start_pfn);
|
||||
end_pg = pfn_to_page(end_pfn);
|
||||
|
||||
/*
|
||||
* Convert to physical addresses, and
|
||||
* round start upwards and end downwards.
|
||||
*/
|
||||
pg = PAGE_ALIGN(__pa(start_pg));
|
||||
pgend = __pa(end_pg) & PAGE_MASK;
|
||||
|
||||
/*
|
||||
* If there are free pages between these,
|
||||
* free the section of the memmap array.
|
||||
*/
|
||||
if (pg < pgend)
|
||||
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
|
||||
}
|
||||
|
||||
/*
|
||||
* The mem_map array can get very big. Free the unused area of the memory map.
|
||||
*/
|
||||
static void __init free_unused_memmap_node(int node, struct meminfo *mi)
|
||||
{
|
||||
unsigned long bank_start, prev_bank_end = 0;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* [FIXME] This relies on each bank being in address order. This
|
||||
* may not be the case, especially if the user has provided the
|
||||
* information on the command line.
|
||||
*/
|
||||
for (i = 0; i < mi->nr_banks; i++) {
|
||||
if (mi->bank[i].size == 0 || mi->bank[i].node != node)
|
||||
continue;
|
||||
|
||||
bank_start = mi->bank[i].start >> PAGE_SHIFT;
|
||||
if (bank_start < prev_bank_end) {
|
||||
printk(KERN_ERR "MEM: unordered memory banks. "
|
||||
"Not freeing memmap.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we had a previous bank, and there is a space
|
||||
* between the current bank and the previous, free it.
|
||||
*/
|
||||
if (prev_bank_end && prev_bank_end != bank_start)
|
||||
free_memmap(node, prev_bank_end, bank_start);
|
||||
|
||||
prev_bank_end = (mi->bank[i].start +
|
||||
mi->bank[i].size) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* mem_init() marks the free areas in the mem_map and tells us how much
|
||||
* memory is free. This is done after various parts of the system have
|
||||
|
@ -540,16 +603,12 @@ void __init mem_init(void)
|
|||
max_mapnr = virt_to_page(high_memory) - mem_map;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We may have non-contiguous memory.
|
||||
*/
|
||||
if (meminfo.nr_banks != 1)
|
||||
create_memmap_holes(&meminfo);
|
||||
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
for_each_online_node(node) {
|
||||
pg_data_t *pgdat = NODE_DATA(node);
|
||||
|
||||
free_unused_memmap_node(node, &meminfo);
|
||||
|
||||
if (pgdat->node_spanned_pages != 0)
|
||||
totalram_pages += free_all_bootmem_node(pgdat);
|
||||
}
|
||||
|
|
|
@ -697,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
|
|||
for (i = 0; i < nr; i++)
|
||||
create_mapping(io_desc + i);
|
||||
}
|
||||
|
||||
static inline void
|
||||
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
struct page *start_pg, *end_pg;
|
||||
unsigned long pg, pgend;
|
||||
|
||||
/*
|
||||
* Convert start_pfn/end_pfn to a struct page pointer.
|
||||
*/
|
||||
start_pg = pfn_to_page(start_pfn);
|
||||
end_pg = pfn_to_page(end_pfn);
|
||||
|
||||
/*
|
||||
* Convert to physical addresses, and
|
||||
* round start upwards and end downwards.
|
||||
*/
|
||||
pg = PAGE_ALIGN(__pa(start_pg));
|
||||
pgend = __pa(end_pg) & PAGE_MASK;
|
||||
|
||||
/*
|
||||
* If there are free pages between these,
|
||||
* free the section of the memmap array.
|
||||
*/
|
||||
if (pg < pgend)
|
||||
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
|
||||
}
|
||||
|
||||
static inline void free_unused_memmap_node(int node, struct meminfo *mi)
|
||||
{
|
||||
unsigned long bank_start, prev_bank_end = 0;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* [FIXME] This relies on each bank being in address order. This
|
||||
* may not be the case, especially if the user has provided the
|
||||
* information on the command line.
|
||||
*/
|
||||
for (i = 0; i < mi->nr_banks; i++) {
|
||||
if (mi->bank[i].size == 0 || mi->bank[i].node != node)
|
||||
continue;
|
||||
|
||||
bank_start = mi->bank[i].start >> PAGE_SHIFT;
|
||||
if (bank_start < prev_bank_end) {
|
||||
printk(KERN_ERR "MEM: unordered memory banks. "
|
||||
"Not freeing memmap.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we had a previous bank, and there is a space
|
||||
* between the current bank and the previous, free it.
|
||||
*/
|
||||
if (prev_bank_end && prev_bank_end != bank_start)
|
||||
free_memmap(node, prev_bank_end, bank_start);
|
||||
|
||||
prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
|
||||
mi->bank[i].size) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The mem_map array can get very big. Free
|
||||
* the unused area of the memory map.
|
||||
*/
|
||||
void __init create_memmap_holes(struct meminfo *mi)
|
||||
{
|
||||
int node;
|
||||
|
||||
for_each_online_node(node)
|
||||
free_unused_memmap_node(node, mi);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue