memblock: rename memblock_alloc{_nid,_try_nid} to memblock_phys_alloc*
Make it explicit that the caller gets a physical address rather than a virtual one. This will also allow using meblock_alloc prefix for memblock allocations returning virtual address, which is done in the following patches. The conversion is done using the following semantic patch: @@ expression e1, e2, e3; @@ ( - memblock_alloc(e1, e2) + memblock_phys_alloc(e1, e2) | - memblock_alloc_nid(e1, e2, e3) + memblock_phys_alloc_nid(e1, e2, e3) | - memblock_alloc_try_nid(e1, e2, e3) + memblock_phys_alloc_try_nid(e1, e2, e3) ) Link: http://lkml.kernel.org/r/1536927045-23536-7-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b146ada221
commit
9a8dd708d5
|
@ -721,7 +721,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
|
|||
|
||||
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
|
||||
{
|
||||
void *ptr = __va(memblock_alloc(sz, align));
|
||||
void *ptr = __va(memblock_phys_alloc(sz, align));
|
||||
memset(ptr, 0, sz);
|
||||
return ptr;
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ static phys_addr_t __init early_pgtable_alloc(void)
|
|||
phys_addr_t phys;
|
||||
void *ptr;
|
||||
|
||||
phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
|
||||
|
|
|
@ -237,7 +237,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
|||
if (start_pfn >= end_pfn)
|
||||
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
|
||||
|
||||
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd = __va(nd_pa);
|
||||
|
||||
/* report and initialize */
|
||||
|
|
|
@ -135,8 +135,8 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
|
|||
if (dma_size & (PAGE_SIZE - 1))
|
||||
++dma_pages;
|
||||
|
||||
bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
|
||||
sizeof(long));
|
||||
bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
|
||||
sizeof(long));
|
||||
|
||||
dma_bitmap = phys_to_virt(bitmap_phys);
|
||||
memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
|
||||
|
|
|
@ -81,7 +81,7 @@ static void __init map_ram(void)
|
|||
}
|
||||
|
||||
/* Alloc one page for holding PTE's... */
|
||||
pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
memset(pte, 0, PAGE_SIZE);
|
||||
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
|
||||
|
||||
|
@ -114,7 +114,7 @@ static void __init fixedrange_init(void)
|
|||
pgd = swapper_pg_dir + pgd_index(vaddr);
|
||||
pud = pud_offset(pgd, vaddr);
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
fixmap_pmd_p = (pmd_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
memset(fixmap_pmd_p, 0, PAGE_SIZE);
|
||||
set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
|
||||
|
||||
|
@ -127,7 +127,7 @@ static void __init fixedrange_init(void)
|
|||
pgd = swapper_pg_dir + pgd_index(vaddr);
|
||||
pud = pud_offset(pgd, vaddr);
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
memset(pte, 0, PAGE_SIZE);
|
||||
set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
|
||||
pkmap_page_table = pte;
|
||||
|
@ -153,7 +153,7 @@ void __init paging_init(void)
|
|||
fixedrange_init();
|
||||
|
||||
/* allocate space for empty_zero_page */
|
||||
zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
zero_page = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
memset(zero_page, 0, PAGE_SIZE);
|
||||
zone_sizes_init();
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ static void __init map_ram(void)
|
|||
}
|
||||
|
||||
/* Alloc one page for holding PTE's... */
|
||||
pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
|
||||
|
||||
/* Fill the newly allocated page with PTE'S */
|
||||
|
|
|
@ -126,7 +126,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||
if (likely(mem_init_done)) {
|
||||
pte = (pte_t *) __get_free_page(GFP_KERNEL);
|
||||
} else {
|
||||
pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
}
|
||||
|
||||
if (pte)
|
||||
|
|
|
@ -1008,9 +1008,7 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
|
|||
/* Count and allocate space for cpu features */
|
||||
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
|
||||
&nr_dt_cpu_features);
|
||||
dt_cpu_features = __va(
|
||||
memblock_alloc(sizeof(struct dt_cpu_feature)*
|
||||
nr_dt_cpu_features, PAGE_SIZE));
|
||||
dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
|
||||
|
||||
cpufeatures_setup_start(isa);
|
||||
|
||||
|
|
|
@ -198,7 +198,7 @@ void __init allocate_paca_ptrs(void)
|
|||
paca_nr_cpu_ids = nr_cpu_ids;
|
||||
|
||||
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
|
||||
paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
|
||||
paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, 0));
|
||||
memset(paca_ptrs, 0x88, paca_ptrs_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static void __init move_device_tree(void)
|
|||
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
|
||||
overlaps_crashkernel(start, size) ||
|
||||
overlaps_initrd(start, size)) {
|
||||
p = __va(memblock_alloc(size, PAGE_SIZE));
|
||||
p = __va(memblock_phys_alloc(size, PAGE_SIZE));
|
||||
memcpy(p, initial_boot_params, size);
|
||||
initial_boot_params = p;
|
||||
DBG("Moved device tree to 0x%p\n", p);
|
||||
|
|
|
@ -460,8 +460,7 @@ void __init smp_setup_cpu_maps(void)
|
|||
|
||||
DBG("smp_setup_cpu_maps()\n");
|
||||
|
||||
cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32),
|
||||
__alignof__(u32)));
|
||||
cpu_to_phys_id = __va(memblock_phys_alloc(nr_cpu_ids * sizeof(u32), __alignof__(u32)));
|
||||
memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
|
||||
|
||||
for_each_node_by_type(dn, "cpu") {
|
||||
|
|
|
@ -206,9 +206,9 @@ void __init irqstack_early_init(void)
|
|||
* as the memblock is limited to lowmem by default */
|
||||
for_each_possible_cpu(i) {
|
||||
softirq_ctx[i] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
hardirq_ctx[i] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,12 +227,12 @@ void __init exc_lvl_early_init(void)
|
|||
#endif
|
||||
|
||||
critirq_ctx[hw_cpu] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
#ifdef CONFIG_BOOKE
|
||||
dbgirq_ctx[hw_cpu] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
mcheckirq_ctx[hw_cpu] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -788,7 +788,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
|||
void *nd;
|
||||
int tnid;
|
||||
|
||||
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd = __va(nd_pa);
|
||||
|
||||
/* report and initialize */
|
||||
|
|
|
@ -50,7 +50,7 @@ __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
|||
if (slab_is_available()) {
|
||||
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||
} else {
|
||||
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
if (pte)
|
||||
clear_page(pte);
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@ void __init MMU_init_hw(void)
|
|||
* Find some memory for the hash table.
|
||||
*/
|
||||
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
|
||||
Hash = __va(memblock_alloc(Hash_size, Hash_size));
|
||||
Hash = __va(memblock_phys_alloc(Hash_size, Hash_size));
|
||||
memset(Hash, 0, Hash_size);
|
||||
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ static int __init iob_init(struct device_node *dn)
|
|||
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
|
||||
|
||||
/* Allocate a spare page to map all invalid IOTLB pages. */
|
||||
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
|
||||
tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
|
||||
if (!tmp)
|
||||
panic("IOBMAP: Cannot allocate spare page!");
|
||||
/* Empty l1 is marked invalid */
|
||||
|
|
|
@ -171,7 +171,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
|
|||
/*
|
||||
* Allocate a buffer to hold the MC recoverable ranges.
|
||||
*/
|
||||
mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64)));
|
||||
mc_recoverable_range =__va(memblock_phys_alloc(size, __alignof__(u64)));
|
||||
memset(mc_recoverable_range, 0, size);
|
||||
|
||||
for (i = 0; i < mc_recoverable_range_len; i++) {
|
||||
|
|
|
@ -261,7 +261,7 @@ static void allocate_dart(void)
|
|||
* that to work around what looks like a problem with the HT bridge
|
||||
* prefetching into invalid pages and corrupting data
|
||||
*/
|
||||
tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
|
||||
tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
|
||||
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
|
||||
DARTMAP_RPNMASK);
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
|
|||
{
|
||||
struct save_area *sa;
|
||||
|
||||
sa = (void *) memblock_alloc(sizeof(*sa), 8);
|
||||
sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
|
||||
if (is_boot_cpu)
|
||||
list_add(&sa->list, &dump_save_areas);
|
||||
else
|
||||
|
|
|
@ -967,7 +967,8 @@ static void __init setup_randomness(void)
|
|||
{
|
||||
struct sysinfo_3_2_2 *vmms;
|
||||
|
||||
vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
|
||||
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
|
||||
memblock_free((unsigned long) vmms, PAGE_SIZE);
|
||||
|
|
|
@ -36,7 +36,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
|
|||
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return (void *) memblock_alloc(size, size);
|
||||
return (void *) memblock_phys_alloc(size, size);
|
||||
}
|
||||
|
||||
void *vmem_crst_alloc(unsigned long val)
|
||||
|
@ -57,7 +57,7 @@ pte_t __ref *vmem_pte_alloc(void)
|
|||
if (slab_is_available())
|
||||
pte = (pte_t *) page_table_alloc(&init_mm);
|
||||
else
|
||||
pte = (pte_t *) memblock_alloc(size, size);
|
||||
pte = (pte_t *) memblock_phys_alloc(size, size);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
|
|
|
@ -64,7 +64,7 @@ static __init pg_data_t *alloc_node_data(void)
|
|||
{
|
||||
pg_data_t *res;
|
||||
|
||||
res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
|
||||
res = (pg_data_t *) memblock_phys_alloc(sizeof(pg_data_t), 8);
|
||||
memset(res, 0, sizeof(pg_data_t));
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
|
|||
mdesc_size);
|
||||
alloc_size = PAGE_ALIGN(handle_size);
|
||||
|
||||
paddr = memblock_alloc(alloc_size, PAGE_SIZE);
|
||||
paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
|
||||
|
||||
hp = NULL;
|
||||
if (paddr) {
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
void * __init prom_early_alloc(unsigned long size)
|
||||
{
|
||||
unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
|
||||
unsigned long paddr = memblock_phys_alloc(size, SMP_CACHE_BYTES);
|
||||
void *ret;
|
||||
|
||||
if (!paddr) {
|
||||
|
|
|
@ -1092,7 +1092,8 @@ static void __init allocate_node_data(int nid)
|
|||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
unsigned long paddr;
|
||||
|
||||
paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
|
||||
paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data),
|
||||
SMP_CACHE_BYTES, nid);
|
||||
if (!paddr) {
|
||||
prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
|
||||
prom_halt();
|
||||
|
@ -1266,8 +1267,8 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
|
|||
if (!count)
|
||||
return -ENOENT;
|
||||
|
||||
paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
|
||||
SMP_CACHE_BYTES);
|
||||
paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
|
||||
SMP_CACHE_BYTES);
|
||||
if (!paddr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1307,8 +1308,8 @@ static int __init grab_mblocks(struct mdesc_handle *md)
|
|||
if (!count)
|
||||
return -ENOENT;
|
||||
|
||||
paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
|
||||
SMP_CACHE_BYTES);
|
||||
paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
|
||||
SMP_CACHE_BYTES);
|
||||
if (!paddr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ static void __init build_mem_type_table(void)
|
|||
|
||||
static void __init *early_alloc(unsigned long sz)
|
||||
{
|
||||
void *ptr = __va(memblock_alloc(sz, sz));
|
||||
void *ptr = __va(memblock_phys_alloc(sz, sz));
|
||||
memset(ptr, 0, sz);
|
||||
return ptr;
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ static void __init alloc_node_data(int nid)
|
|||
* Allocate node data. Try node-local memory and then any node.
|
||||
* Never allocate in DMA zone.
|
||||
*/
|
||||
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||
if (!nd_pa) {
|
||||
nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
|
||||
{
|
||||
return memblock_alloc(size, 0);
|
||||
return memblock_phys_alloc(size, 0);
|
||||
}
|
||||
|
||||
static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
|
||||
|
|
|
@ -300,10 +300,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
|
|||
}
|
||||
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||
|
||||
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||
phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||
|
||||
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
||||
phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
|
||||
|
||||
/*
|
||||
* Set the allocation direction to bottom-up or top-down.
|
||||
|
|
|
@ -1269,7 +1269,7 @@ phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
|
|||
return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
|
||||
}
|
||||
|
||||
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
|
||||
phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
|
||||
{
|
||||
enum memblock_flags flags = choose_memblock_flags();
|
||||
phys_addr_t ret;
|
||||
|
@ -1304,14 +1304,14 @@ phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys
|
|||
return alloc;
|
||||
}
|
||||
|
||||
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||
}
|
||||
|
||||
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
|
||||
phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
|
||||
{
|
||||
phys_addr_t res = memblock_alloc_nid(size, align, nid);
|
||||
phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
|
||||
|
||||
if (res)
|
||||
return res;
|
||||
|
|
Loading…
Reference in New Issue