mirror of https://gitee.com/openkylin/linux.git
mm: section numbers use the type "unsigned long"
Patch series "mm: Further memory block device cleanups", v1. Some further cleanups around memory block devices. Especially, clean up and simplify walk_memory_range(). Including some other minor cleanups. This patch (of 6): We are using a mixture of "int" and "unsigned long". Let's make this consistent by using "unsigned long" everywhere. We'll do the same with memory block ids next. While at it, turn the "unsigned long i" in removable_show() into an int - sections_per_block is an int. [akpm@linux-foundation.org: s/unsigned long i/unsigned long nr/] [david@redhat.com: v3] Link: http://lkml.kernel.org/r/20190620183139.4352-2-david@redhat.com Link: http://lkml.kernel.org/r/20190614100114.311-2-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michal Hocko <mhocko@suse.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Arun KS <arunks@codeaurora.org> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
756398750e
commit
2491f0a2c0
|
@ -34,7 +34,7 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
|
||||||
|
|
||||||
static int sections_per_block;
|
static int sections_per_block;
|
||||||
|
|
||||||
static inline int base_memory_block_id(int section_nr)
|
static inline int base_memory_block_id(unsigned long section_nr)
|
||||||
{
|
{
|
||||||
return section_nr / sections_per_block;
|
return section_nr / sections_per_block;
|
||||||
}
|
}
|
||||||
|
@ -131,9 +131,9 @@ static ssize_t phys_index_show(struct device *dev,
|
||||||
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
|
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
unsigned long i, pfn;
|
|
||||||
int ret = 1;
|
|
||||||
struct memory_block *mem = to_memory_block(dev);
|
struct memory_block *mem = to_memory_block(dev);
|
||||||
|
unsigned long pfn;
|
||||||
|
int ret = 1, i;
|
||||||
|
|
||||||
if (mem->state != MEM_ONLINE)
|
if (mem->state != MEM_ONLINE)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -691,15 +691,15 @@ static int init_memory_block(struct memory_block **memory, int block_id,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_memory_block(int base_section_nr)
|
static int add_memory_block(unsigned long base_section_nr)
|
||||||
{
|
{
|
||||||
|
int ret, section_count = 0;
|
||||||
struct memory_block *mem;
|
struct memory_block *mem;
|
||||||
int i, ret, section_count = 0;
|
unsigned long nr;
|
||||||
|
|
||||||
for (i = base_section_nr;
|
for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
|
||||||
i < base_section_nr + sections_per_block;
|
nr++)
|
||||||
i++)
|
if (present_section_nr(nr))
|
||||||
if (present_section_nr(i))
|
|
||||||
section_count++;
|
section_count++;
|
||||||
|
|
||||||
if (section_count == 0)
|
if (section_count == 0)
|
||||||
|
@ -822,10 +822,9 @@ static const struct attribute_group *memory_root_attr_groups[] = {
|
||||||
*/
|
*/
|
||||||
int __init memory_dev_init(void)
|
int __init memory_dev_init(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
int ret;
|
int ret;
|
||||||
int err;
|
int err;
|
||||||
unsigned long block_sz;
|
unsigned long block_sz, nr;
|
||||||
|
|
||||||
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
|
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -839,9 +838,9 @@ int __init memory_dev_init(void)
|
||||||
* during boot and have been initialized
|
* during boot and have been initialized
|
||||||
*/
|
*/
|
||||||
mutex_lock(&mem_sysfs_mutex);
|
mutex_lock(&mem_sysfs_mutex);
|
||||||
for (i = 0; i <= __highest_present_section_nr;
|
for (nr = 0; nr <= __highest_present_section_nr;
|
||||||
i += sections_per_block) {
|
nr += sections_per_block) {
|
||||||
err = add_memory_block(i);
|
err = add_memory_block(nr);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1219,7 +1219,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
|
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
|
||||||
}
|
}
|
||||||
extern int __section_nr(struct mem_section* ms);
|
extern unsigned long __section_nr(struct mem_section *ms);
|
||||||
extern unsigned long usemap_size(void);
|
extern unsigned long usemap_size(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1291,7 +1291,7 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
|
||||||
return __nr_to_section(pfn_to_section_nr(pfn));
|
return __nr_to_section(pfn_to_section_nr(pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int __highest_present_section_nr;
|
extern unsigned long __highest_present_section_nr;
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
|
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
|
||||||
static inline int pfn_valid(unsigned long pfn)
|
static inline int pfn_valid(unsigned long pfn)
|
||||||
|
|
12
mm/sparse.c
12
mm/sparse.c
|
@ -102,7 +102,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SPARSEMEM_EXTREME
|
#ifdef CONFIG_SPARSEMEM_EXTREME
|
||||||
int __section_nr(struct mem_section* ms)
|
unsigned long __section_nr(struct mem_section *ms)
|
||||||
{
|
{
|
||||||
unsigned long root_nr;
|
unsigned long root_nr;
|
||||||
struct mem_section *root = NULL;
|
struct mem_section *root = NULL;
|
||||||
|
@ -121,9 +121,9 @@ int __section_nr(struct mem_section* ms)
|
||||||
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
|
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
int __section_nr(struct mem_section* ms)
|
unsigned long __section_nr(struct mem_section *ms)
|
||||||
{
|
{
|
||||||
return (int)(ms - mem_section[0]);
|
return (unsigned long)(ms - mem_section[0]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -178,10 +178,10 @@ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
||||||
* Keeping track of this gives us an easy way to break out of
|
* Keeping track of this gives us an easy way to break out of
|
||||||
* those loops early.
|
* those loops early.
|
||||||
*/
|
*/
|
||||||
int __highest_present_section_nr;
|
unsigned long __highest_present_section_nr;
|
||||||
static void section_mark_present(struct mem_section *ms)
|
static void section_mark_present(struct mem_section *ms)
|
||||||
{
|
{
|
||||||
int section_nr = __section_nr(ms);
|
unsigned long section_nr = __section_nr(ms);
|
||||||
|
|
||||||
if (section_nr > __highest_present_section_nr)
|
if (section_nr > __highest_present_section_nr)
|
||||||
__highest_present_section_nr = section_nr;
|
__highest_present_section_nr = section_nr;
|
||||||
|
@ -189,7 +189,7 @@ static void section_mark_present(struct mem_section *ms)
|
||||||
ms->section_mem_map |= SECTION_MARKED_PRESENT;
|
ms->section_mem_map |= SECTION_MARKED_PRESENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int next_present_section_nr(int section_nr)
|
static inline unsigned long next_present_section_nr(unsigned long section_nr)
|
||||||
{
|
{
|
||||||
do {
|
do {
|
||||||
section_nr++;
|
section_nr++;
|
||||||
|
|
Loading…
Reference in New Issue