mirror of https://gitee.com/openkylin/linux.git
Kmemleak patches
Main features: - Handle percpu memory allocations (only scanning them, not actually reporting). - Memory hotplug support. Usability improvements: - Show the origin of early allocations. - Report previously found leaks even if kmemleak has been disabled by some error. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.9 (GNU/Linux) iQIcBAABAgAGBQJPDCI0AAoJEGvWsS0AyF7x+MUQALEQTnREqBgpqa+95Wk8WaEB F/00mbwRpLVKl1jsfCn4wxFPUGuXS/oaxYztDSTP8BrEzZ5E0Kq+Ejsby9yPLs5r 9nwsoRrxBerUjFHqXjx2xrTkAZQomLesNw5ZkaKFVgBzNo7O63Co4TGuP5J8s03G 7hyewcZvbmzkX1SpqMvPItdUTpK+vwABBHGvYta6NS89Bt9GuexC/NS3o2qy2q6c 2BXhUXSJyYsalxvsYYw+hNOyVWrFJ/TWJKsksg9ANxzcbkLKUat9IpvcR3CTRUpu L/72GXGCDyMw3YgXs8MBlOk3KXRcobISYCVMsDuVz6tITP7RHCB6rG/Hg55YWxeS 1N2P0kMFkDGVui4pzPZZENUH1QfuwoZ5RpgJ2OCaVnfguLOgGM9k665KT9OScWeC tpxoS82jGd5RezrgF30yvpLz2CivvjRiEpIXL8o47pg/kESgY1PFnDwTW8imoikt dTQFZXYeFzjcHkN1YNUXgjNfh+CqCkUXLQ5k+8vQ+9TFWh21thwuzg5AGcK28xTc 6mGzSsJzx2w7IKTCjZ3BGN+IXt/KpC4iKyIEFeNsgy9Z8gU0I0GaMVixQtZFxeEt asqNBaQGngJ86BeO1bjRB/YKO+F+ZIchJiGN4PNgtc4BGz45LGfKOfRjlku4rmsZ 8OJRqGx5qZykxYhNSHXq =5lb1 -----END PGP SIGNATURE----- Merge tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux Kmemleak patches Main features: - Handle percpu memory allocations (only scanning them, not actually reporting). - Memory hotplug support. Usability improvements: - Show the origin of early allocations. - Report previously found leaks even if kmemleak has been disabled by some error. * tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux: kmemleak: Add support for memory hotplug kmemleak: Handle percpu memory allocation kmemleak: Report previously found leaks even after an error kmemleak: When the early log buffer is exceeded, report the actual number kmemleak: Show where early_log issues come from
This commit is contained in:
commit
892d208bcf
|
@ -127,7 +127,10 @@ See the include/linux/kmemleak.h header for the functions prototype.
|
|||
|
||||
kmemleak_init - initialize kmemleak
|
||||
kmemleak_alloc - notify of a memory block allocation
|
||||
kmemleak_alloc_percpu - notify of a percpu memory block allocation
|
||||
kmemleak_free - notify of a memory block freeing
|
||||
kmemleak_free_part - notify of a partial memory block freeing
|
||||
kmemleak_free_percpu - notify of a percpu memory block freeing
|
||||
kmemleak_not_leak - mark an object as not a leak
|
||||
kmemleak_ignore - do not scan or report an object as leak
|
||||
kmemleak_scan_area - add scan areas inside a memory block
|
||||
|
|
|
@ -26,8 +26,10 @@
|
|||
extern void kmemleak_init(void) __ref;
|
||||
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
|
||||
extern void kmemleak_free(const void *ptr) __ref;
|
||||
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
|
||||
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
|
||||
extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
||||
size_t size) __ref;
|
||||
extern void kmemleak_not_leak(const void *ptr) __ref;
|
||||
|
@ -68,6 +70,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
|||
gfp_t gfp)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
@ -77,6 +82,9 @@ static inline void kmemleak_free_part(const void *ptr, size_t size)
|
|||
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free_percpu(const void __percpu *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_not_leak(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -414,7 +414,7 @@ config SLUB_STATS
|
|||
|
||||
config DEBUG_KMEMLEAK
|
||||
bool "Kernel memory leak detector"
|
||||
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
|
||||
depends on DEBUG_KERNEL && EXPERIMENTAL && \
|
||||
(X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
|
||||
|
||||
select DEBUG_FS
|
||||
|
|
158
mm/kmemleak.c
158
mm/kmemleak.c
|
@ -100,6 +100,7 @@
|
|||
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
|
||||
/*
|
||||
* Kmemleak configuration and common defines.
|
||||
|
@ -196,7 +197,9 @@ static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
|
|||
static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
|
||||
/* enables or disables early logging of the memory operations */
|
||||
static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
|
||||
/* set if a fata kmemleak error has occurred */
|
||||
/* set if a kmemleak warning was issued */
|
||||
static atomic_t kmemleak_warning = ATOMIC_INIT(0);
|
||||
/* set if a fatal kmemleak error has occurred */
|
||||
static atomic_t kmemleak_error = ATOMIC_INIT(0);
|
||||
|
||||
/* minimum and maximum address that may be valid pointers */
|
||||
|
@ -228,8 +231,10 @@ static int kmemleak_skip_disable;
|
|||
/* kmemleak operation type for early logging */
|
||||
enum {
|
||||
KMEMLEAK_ALLOC,
|
||||
KMEMLEAK_ALLOC_PERCPU,
|
||||
KMEMLEAK_FREE,
|
||||
KMEMLEAK_FREE_PART,
|
||||
KMEMLEAK_FREE_PERCPU,
|
||||
KMEMLEAK_NOT_LEAK,
|
||||
KMEMLEAK_IGNORE,
|
||||
KMEMLEAK_SCAN_AREA,
|
||||
|
@ -259,9 +264,10 @@ static void kmemleak_disable(void);
|
|||
/*
|
||||
* Print a warning and dump the stack trace.
|
||||
*/
|
||||
#define kmemleak_warn(x...) do { \
|
||||
pr_warning(x); \
|
||||
dump_stack(); \
|
||||
#define kmemleak_warn(x...) do { \
|
||||
pr_warning(x); \
|
||||
dump_stack(); \
|
||||
atomic_set(&kmemleak_warning, 1); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -403,8 +409,8 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
|
|||
object = prio_tree_entry(node, struct kmemleak_object,
|
||||
tree_node);
|
||||
if (!alias && object->pointer != ptr) {
|
||||
pr_warning("Found object by alias at 0x%08lx\n", ptr);
|
||||
dump_stack();
|
||||
kmemleak_warn("Found object by alias at 0x%08lx\n",
|
||||
ptr);
|
||||
dump_object_info(object);
|
||||
object = NULL;
|
||||
}
|
||||
|
@ -794,9 +800,13 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
|
|||
unsigned long flags;
|
||||
struct early_log *log;
|
||||
|
||||
if (atomic_read(&kmemleak_error)) {
|
||||
/* kmemleak stopped recording, just count the requests */
|
||||
crt_early_log++;
|
||||
return;
|
||||
}
|
||||
|
||||
if (crt_early_log >= ARRAY_SIZE(early_log)) {
|
||||
pr_warning("Early log buffer exceeded, "
|
||||
"please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
|
||||
kmemleak_disable();
|
||||
return;
|
||||
}
|
||||
|
@ -811,8 +821,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
|
|||
log->ptr = ptr;
|
||||
log->size = size;
|
||||
log->min_count = min_count;
|
||||
if (op_type == KMEMLEAK_ALLOC)
|
||||
log->trace_len = __save_stack_trace(log->trace);
|
||||
log->trace_len = __save_stack_trace(log->trace);
|
||||
crt_early_log++;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -846,6 +855,20 @@ static void early_alloc(struct early_log *log)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Log an early allocated block and populate the stack trace.
|
||||
*/
|
||||
static void early_alloc_percpu(struct early_log *log)
|
||||
{
|
||||
unsigned int cpu;
|
||||
const void __percpu *ptr = log->ptr;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
log->ptr = per_cpu_ptr(ptr, cpu);
|
||||
early_alloc(log);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kmemleak_alloc - register a newly allocated object
|
||||
* @ptr: pointer to beginning of the object
|
||||
|
@ -872,6 +895,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_alloc);
|
||||
|
||||
/**
|
||||
* kmemleak_alloc_percpu - register a newly allocated __percpu object
|
||||
* @ptr: __percpu pointer to beginning of the object
|
||||
* @size: size of the object
|
||||
*
|
||||
* This function is called from the kernel percpu allocator when a new object
|
||||
* (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
|
||||
* allocation.
|
||||
*/
|
||||
void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
|
||||
|
||||
/*
|
||||
* Percpu allocations are only scanned and not reported as leaks
|
||||
* (min_count is set to 0).
|
||||
*/
|
||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||
for_each_possible_cpu(cpu)
|
||||
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
|
||||
size, 0, GFP_KERNEL);
|
||||
else if (atomic_read(&kmemleak_early_log))
|
||||
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_free - unregister a previously registered object
|
||||
* @ptr: pointer to beginning of the object
|
||||
|
@ -910,6 +961,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_free_part);
|
||||
|
||||
/**
|
||||
* kmemleak_free_percpu - unregister a previously registered __percpu object
|
||||
* @ptr: __percpu pointer to beginning of the object
|
||||
*
|
||||
* This function is called from the kernel percpu allocator when an object
|
||||
* (memory block) is freed (free_percpu).
|
||||
*/
|
||||
void __ref kmemleak_free_percpu(const void __percpu *ptr)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
pr_debug("%s(0x%p)\n", __func__, ptr);
|
||||
|
||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||
for_each_possible_cpu(cpu)
|
||||
delete_object_full((unsigned long)per_cpu_ptr(ptr,
|
||||
cpu));
|
||||
else if (atomic_read(&kmemleak_early_log))
|
||||
log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_not_leak - mark an allocated object as false positive
|
||||
* @ptr: pointer to beginning of the object
|
||||
|
@ -1220,9 +1293,9 @@ static void kmemleak_scan(void)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Struct page scanning for each node. The code below is not yet safe
|
||||
* with MEMORY_HOTPLUG.
|
||||
* Struct page scanning for each node.
|
||||
*/
|
||||
lock_memory_hotplug();
|
||||
for_each_online_node(i) {
|
||||
pg_data_t *pgdat = NODE_DATA(i);
|
||||
unsigned long start_pfn = pgdat->node_start_pfn;
|
||||
|
@ -1241,6 +1314,7 @@ static void kmemleak_scan(void)
|
|||
scan_block(page, page + 1, NULL, 1);
|
||||
}
|
||||
}
|
||||
unlock_memory_hotplug();
|
||||
|
||||
/*
|
||||
* Scanning the task stacks (may introduce false negatives).
|
||||
|
@ -1467,9 +1541,6 @@ static const struct seq_operations kmemleak_seq_ops = {
|
|||
|
||||
static int kmemleak_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (!atomic_read(&kmemleak_enabled))
|
||||
return -EBUSY;
|
||||
|
||||
return seq_open(file, &kmemleak_seq_ops);
|
||||
}
|
||||
|
||||
|
@ -1543,6 +1614,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
|||
int buf_size;
|
||||
int ret;
|
||||
|
||||
if (!atomic_read(&kmemleak_enabled))
|
||||
return -EBUSY;
|
||||
|
||||
buf_size = min(size, (sizeof(buf) - 1));
|
||||
if (strncpy_from_user(buf, user_buf, buf_size) < 0)
|
||||
return -EFAULT;
|
||||
|
@ -1602,20 +1676,24 @@ static const struct file_operations kmemleak_fops = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Perform the freeing of the kmemleak internal objects after waiting for any
|
||||
* current memory scan to complete.
|
||||
* Stop the memory scanning thread and free the kmemleak internal objects if
|
||||
* no previous scan thread (otherwise, kmemleak may still have some useful
|
||||
* information on memory leaks).
|
||||
*/
|
||||
static void kmemleak_do_cleanup(struct work_struct *work)
|
||||
{
|
||||
struct kmemleak_object *object;
|
||||
bool cleanup = scan_thread == NULL;
|
||||
|
||||
mutex_lock(&scan_mutex);
|
||||
stop_scan_thread();
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(object, &object_list, object_list)
|
||||
delete_object_full(object->pointer);
|
||||
rcu_read_unlock();
|
||||
if (cleanup) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(object, &object_list, object_list)
|
||||
delete_object_full(object->pointer);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
mutex_unlock(&scan_mutex);
|
||||
}
|
||||
|
||||
|
@ -1632,7 +1710,6 @@ static void kmemleak_disable(void)
|
|||
return;
|
||||
|
||||
/* stop any memory operation tracing */
|
||||
atomic_set(&kmemleak_early_log, 0);
|
||||
atomic_set(&kmemleak_enabled, 0);
|
||||
|
||||
/* check whether it is too early for a kernel thread */
|
||||
|
@ -1659,6 +1736,17 @@ static int kmemleak_boot_config(char *str)
|
|||
}
|
||||
early_param("kmemleak", kmemleak_boot_config);
|
||||
|
||||
static void __init print_log_trace(struct early_log *log)
|
||||
{
|
||||
struct stack_trace trace;
|
||||
|
||||
trace.nr_entries = log->trace_len;
|
||||
trace.entries = log->trace;
|
||||
|
||||
pr_notice("Early log backtrace:\n");
|
||||
print_stack_trace(&trace, 2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kmemleak initialization.
|
||||
*/
|
||||
|
@ -1681,12 +1769,18 @@ void __init kmemleak_init(void)
|
|||
scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
|
||||
INIT_PRIO_TREE_ROOT(&object_tree_root);
|
||||
|
||||
if (crt_early_log >= ARRAY_SIZE(early_log))
|
||||
pr_warning("Early log buffer exceeded (%d), please increase "
|
||||
"DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
|
||||
|
||||
/* the kernel is still in UP mode, so disabling the IRQs is enough */
|
||||
local_irq_save(flags);
|
||||
if (!atomic_read(&kmemleak_error)) {
|
||||
atomic_set(&kmemleak_early_log, 0);
|
||||
if (atomic_read(&kmemleak_error)) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
} else
|
||||
atomic_set(&kmemleak_enabled, 1);
|
||||
atomic_set(&kmemleak_early_log, 0);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
|
@ -1701,12 +1795,18 @@ void __init kmemleak_init(void)
|
|||
case KMEMLEAK_ALLOC:
|
||||
early_alloc(log);
|
||||
break;
|
||||
case KMEMLEAK_ALLOC_PERCPU:
|
||||
early_alloc_percpu(log);
|
||||
break;
|
||||
case KMEMLEAK_FREE:
|
||||
kmemleak_free(log->ptr);
|
||||
break;
|
||||
case KMEMLEAK_FREE_PART:
|
||||
kmemleak_free_part(log->ptr, log->size);
|
||||
break;
|
||||
case KMEMLEAK_FREE_PERCPU:
|
||||
kmemleak_free_percpu(log->ptr);
|
||||
break;
|
||||
case KMEMLEAK_NOT_LEAK:
|
||||
kmemleak_not_leak(log->ptr);
|
||||
break;
|
||||
|
@ -1720,7 +1820,13 @@ void __init kmemleak_init(void)
|
|||
kmemleak_no_scan(log->ptr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
kmemleak_warn("Unknown early log operation: %d\n",
|
||||
log->op_type);
|
||||
}
|
||||
|
||||
if (atomic_read(&kmemleak_warning)) {
|
||||
print_log_trace(log);
|
||||
atomic_set(&kmemleak_warning, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
12
mm/percpu.c
12
mm/percpu.c
|
@ -67,6 +67,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
|
|||
const char *err;
|
||||
int slot, off, new_alloc;
|
||||
unsigned long flags;
|
||||
void __percpu *ptr;
|
||||
|
||||
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
|
||||
WARN(true, "illegal size (%zu) or align (%zu) for "
|
||||
|
@ -802,7 +804,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
|
|||
mutex_unlock(&pcpu_alloc_mutex);
|
||||
|
||||
/* return address relative to base address */
|
||||
return __addr_to_pcpu_ptr(chunk->base_addr + off);
|
||||
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
|
||||
kmemleak_alloc_percpu(ptr, size);
|
||||
return ptr;
|
||||
|
||||
fail_unlock:
|
||||
spin_unlock_irqrestore(&pcpu_lock, flags);
|
||||
|
@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
|
|||
if (!ptr)
|
||||
return;
|
||||
|
||||
kmemleak_free_percpu(ptr);
|
||||
|
||||
addr = __pcpu_ptr_to_addr(ptr);
|
||||
|
||||
spin_lock_irqsave(&pcpu_lock, flags);
|
||||
|
@ -1639,6 +1645,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
|
|||
rc = -ENOMEM;
|
||||
goto out_free_areas;
|
||||
}
|
||||
/* kmemleak tracks the percpu allocations separately */
|
||||
kmemleak_free(ptr);
|
||||
areas[group] = ptr;
|
||||
|
||||
base = min(ptr, base);
|
||||
|
@ -1753,6 +1761,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
|
|||
"for cpu%u\n", psize_str, cpu);
|
||||
goto enomem;
|
||||
}
|
||||
/* kmemleak tracks the percpu allocations separately */
|
||||
kmemleak_free(ptr);
|
||||
pages[j++] = virt_to_page(ptr);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue