2019-11-15 02:02:54 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/bsearch.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2020-02-06 23:46:26 +08:00
|
|
|
#include <linux/sched.h>
|
2019-11-15 02:02:54 +08:00
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sort.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
#include "kcsan.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Statistics counters.
|
|
|
|
*/
|
|
|
|
static atomic_long_t counters[KCSAN_COUNTER_COUNT];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Addresses for filtering functions from reporting. This list can be used as a
|
|
|
|
* whitelist or blacklist.
|
|
|
|
*/
|
|
|
|
static struct {
|
2019-11-20 17:41:43 +08:00
|
|
|
unsigned long *addrs; /* array of addresses */
|
|
|
|
size_t size; /* current size */
|
|
|
|
int used; /* number of elements used */
|
|
|
|
bool sorted; /* if elements are sorted */
|
|
|
|
bool whitelist; /* if list is a blacklist or whitelist */
|
2019-11-15 02:02:54 +08:00
|
|
|
} report_filterlist = {
|
2019-11-20 17:41:43 +08:00
|
|
|
.addrs = NULL,
|
|
|
|
.size = 8, /* small initial size */
|
|
|
|
.used = 0,
|
|
|
|
.sorted = false,
|
|
|
|
.whitelist = false, /* default is blacklist */
|
2019-11-15 02:02:54 +08:00
|
|
|
};
|
|
|
|
static DEFINE_SPINLOCK(report_filterlist_lock);
|
|
|
|
|
|
|
|
static const char *counter_to_name(enum kcsan_counter_id id)
|
|
|
|
{
|
|
|
|
switch (id) {
|
2019-11-20 17:41:43 +08:00
|
|
|
case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
|
|
|
|
case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
|
|
|
|
case KCSAN_COUNTER_DATA_RACES: return "data_races";
|
kcsan: Introduce KCSAN_ACCESS_ASSERT access type
The KCSAN_ACCESS_ASSERT access type may be used to introduce dummy reads
and writes to assert certain properties of concurrent code, where bugs
could not be detected as normal data races.
For example, a variable that is only meant to be written by a single
CPU, but may be read (without locking) by other CPUs must still be
marked properly to avoid data races. However, concurrent writes,
regardless if WRITE_ONCE() or not, would be a bug. Using
kcsan_check_access(&x, sizeof(x), KCSAN_ACCESS_ASSERT) would allow
catching such bugs.
To support KCSAN_ACCESS_ASSERT the following notable changes were made:
* If an access is of type KCSAN_ASSERT_ACCESS, disable various filters
that only apply to data races, so that all races that KCSAN observes are
reported.
* Bug reports that involve an ASSERT access type will be reported as
"KCSAN: assert: race in ..." instead of "data-race"; this will help
more easily distinguish them.
* Update a few comments to just mention 'races' where we do not always
mean pure data races.
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2020-02-06 23:46:24 +08:00
|
|
|
case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures";
|
2019-11-20 17:41:43 +08:00
|
|
|
case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
|
|
|
|
case KCSAN_COUNTER_REPORT_RACES: return "report_races";
|
|
|
|
case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
|
|
|
|
case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
|
|
|
|
case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
|
2019-11-15 02:02:54 +08:00
|
|
|
case KCSAN_COUNTER_COUNT:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kcsan_counter_inc(enum kcsan_counter_id id)
|
|
|
|
{
|
|
|
|
atomic_long_inc(&counters[id]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kcsan_counter_dec(enum kcsan_counter_id id)
|
|
|
|
{
|
|
|
|
atomic_long_dec(&counters[id]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
|
|
|
|
* multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
|
2020-02-06 23:46:26 +08:00
|
|
|
* debugfs file. This will not generate any conflicts, and tests fast-path only.
|
2019-11-15 02:02:54 +08:00
|
|
|
*/
|
2020-02-06 23:46:26 +08:00
|
|
|
static noinline void microbenchmark(unsigned long iters)
|
2019-11-15 02:02:54 +08:00
|
|
|
{
|
|
|
|
cycles_t cycles;
|
|
|
|
|
|
|
|
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
|
|
|
|
|
|
|
|
cycles = get_cycles();
|
|
|
|
while (iters--) {
|
|
|
|
/*
|
|
|
|
* We can run this benchmark from multiple tasks; this address
|
2020-02-06 23:46:26 +08:00
|
|
|
* calculation increases likelyhood of some accesses
|
|
|
|
* overlapping. Make the access type an atomic read, to never
|
|
|
|
* set up watchpoints and test the fast-path only.
|
2019-11-15 02:02:54 +08:00
|
|
|
*/
|
|
|
|
unsigned long addr =
|
|
|
|
iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
|
2020-02-06 23:46:26 +08:00
|
|
|
__kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
|
2019-11-15 02:02:54 +08:00
|
|
|
}
|
|
|
|
cycles = get_cycles() - cycles;
|
|
|
|
|
|
|
|
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
|
|
|
|
}
|
|
|
|
|
2020-02-06 23:46:26 +08:00
|
|
|
/*
|
|
|
|
* Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
|
|
|
|
* debugfs file from multiple tasks to generate real conflicts and show reports.
|
|
|
|
*/
|
|
|
|
static long test_dummy;
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
static long test_flags;
|
2020-02-06 23:46:26 +08:00
|
|
|
static noinline void test_thread(unsigned long iters)
|
|
|
|
{
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
const long CHANGE_BITS = 0xff00ff00ff00ff00L;
|
2020-02-06 23:46:26 +08:00
|
|
|
const struct kcsan_ctx ctx_save = current->kcsan_ctx;
|
|
|
|
cycles_t cycles;
|
|
|
|
|
|
|
|
/* We may have been called from an atomic region; reset context. */
|
|
|
|
memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
|
|
|
|
|
|
|
|
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
|
2020-02-06 23:46:26 +08:00
|
|
|
|
|
|
|
cycles = get_cycles();
|
|
|
|
while (iters--) {
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
/* These all should generate reports. */
|
2020-02-06 23:46:26 +08:00
|
|
|
__kcsan_check_read(&test_dummy, sizeof(test_dummy));
|
|
|
|
ASSERT_EXCLUSIVE_WRITER(test_dummy);
|
|
|
|
ASSERT_EXCLUSIVE_ACCESS(test_dummy);
|
|
|
|
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
|
|
|
|
__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
|
|
|
|
|
|
|
|
ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
|
|
|
|
__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
|
|
|
|
|
2020-02-06 23:46:26 +08:00
|
|
|
/* not actually instrumented */
|
|
|
|
WRITE_ONCE(test_dummy, iters); /* to observe value-change */
|
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).
For more context on why this was considered necessary, please see:
http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.
Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
1. Avoid proliferation of specific macros at the call sites: by
including a single mask in the argument list, we can use the same
macro in a wide variety of call sites, regardless of how and which
bits in a field each call site actually accesses.
2. The existing code does not need to be modified (although READ_ONCE()
may still be advisable if we cannot prove that the data race is
always safe).
3. We catch bugs where the exclusive bits are modified concurrently.
4. We document properties of the current code.
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Qian Cai <cai@lca.pw>
2020-02-12 00:04:23 +08:00
|
|
|
__kcsan_check_write(&test_dummy, sizeof(test_dummy));
|
|
|
|
|
|
|
|
test_flags ^= CHANGE_BITS; /* generate value-change */
|
|
|
|
__kcsan_check_write(&test_flags, sizeof(test_flags));
|
2020-02-06 23:46:26 +08:00
|
|
|
}
|
|
|
|
cycles = get_cycles() - cycles;
|
|
|
|
|
|
|
|
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
|
|
|
|
|
|
|
|
/* restore context */
|
|
|
|
current->kcsan_ctx = ctx_save;
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:02:54 +08:00
|
|
|
static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
|
|
|
|
{
|
|
|
|
const unsigned long a = *(const unsigned long *)rhs;
|
|
|
|
const unsigned long b = *(const unsigned long *)lhs;
|
|
|
|
|
|
|
|
return a < b ? -1 : a == b ? 0 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kcsan_skip_report_debugfs(unsigned long func_addr)
|
|
|
|
{
|
|
|
|
unsigned long symbolsize, offset;
|
|
|
|
unsigned long flags;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
|
|
|
|
return false;
|
2019-11-20 17:41:43 +08:00
|
|
|
func_addr -= offset; /* Get function start */
|
2019-11-15 02:02:54 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&report_filterlist_lock, flags);
|
|
|
|
if (report_filterlist.used == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Sort array if it is unsorted, and then do a binary search. */
|
|
|
|
if (!report_filterlist.sorted) {
|
|
|
|
sort(report_filterlist.addrs, report_filterlist.used,
|
|
|
|
sizeof(unsigned long), cmp_filterlist_addrs, NULL);
|
|
|
|
report_filterlist.sorted = true;
|
|
|
|
}
|
|
|
|
ret = !!bsearch(&func_addr, report_filterlist.addrs,
|
|
|
|
report_filterlist.used, sizeof(unsigned long),
|
|
|
|
cmp_filterlist_addrs);
|
|
|
|
if (report_filterlist.whitelist)
|
|
|
|
ret = !ret;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&report_filterlist_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_report_filterlist_whitelist(bool whitelist)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&report_filterlist_lock, flags);
|
|
|
|
report_filterlist.whitelist = whitelist;
|
|
|
|
spin_unlock_irqrestore(&report_filterlist_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns 0 on success, error-code otherwise. */
|
|
|
|
static ssize_t insert_report_filterlist(const char *func)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long addr = kallsyms_lookup_name(func);
|
|
|
|
ssize_t ret = 0;
|
|
|
|
|
|
|
|
if (!addr) {
|
|
|
|
pr_err("KCSAN: could not find function: '%s'\n", func);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&report_filterlist_lock, flags);
|
|
|
|
|
|
|
|
if (report_filterlist.addrs == NULL) {
|
|
|
|
/* initial allocation */
|
|
|
|
report_filterlist.addrs =
|
|
|
|
kmalloc_array(report_filterlist.size,
|
|
|
|
sizeof(unsigned long), GFP_KERNEL);
|
|
|
|
if (report_filterlist.addrs == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else if (report_filterlist.used == report_filterlist.size) {
|
|
|
|
/* resize filterlist */
|
|
|
|
size_t new_size = report_filterlist.size * 2;
|
|
|
|
unsigned long *new_addrs =
|
|
|
|
krealloc(report_filterlist.addrs,
|
|
|
|
new_size * sizeof(unsigned long), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (new_addrs == NULL) {
|
|
|
|
/* leave filterlist itself untouched */
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
report_filterlist.size = new_size;
|
|
|
|
report_filterlist.addrs = new_addrs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: deduplicating should be done in userspace. */
|
|
|
|
report_filterlist.addrs[report_filterlist.used++] =
|
|
|
|
kallsyms_lookup_name(func);
|
|
|
|
report_filterlist.sorted = false;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&report_filterlist_lock, flags);
|
2019-11-20 17:41:43 +08:00
|
|
|
|
2019-11-15 02:02:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int show_info(struct seq_file *file, void *v)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* show stats */
|
|
|
|
seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
|
|
|
|
for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
|
|
|
|
seq_printf(file, "%s: %ld\n", counter_to_name(i),
|
|
|
|
atomic_long_read(&counters[i]));
|
|
|
|
|
|
|
|
/* show filter functions, and filter type */
|
|
|
|
spin_lock_irqsave(&report_filterlist_lock, flags);
|
|
|
|
seq_printf(file, "\n%s functions: %s\n",
|
|
|
|
report_filterlist.whitelist ? "whitelisted" : "blacklisted",
|
|
|
|
report_filterlist.used == 0 ? "none" : "");
|
|
|
|
for (i = 0; i < report_filterlist.used; ++i)
|
|
|
|
seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
|
|
|
|
spin_unlock_irqrestore(&report_filterlist_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int debugfs_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, show_info, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-20 17:41:43 +08:00
|
|
|
static ssize_t
|
|
|
|
debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
|
2019-11-15 02:02:54 +08:00
|
|
|
{
|
|
|
|
char kbuf[KSYM_NAME_LEN];
|
|
|
|
char *arg;
|
|
|
|
int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
|
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, read_len))
|
|
|
|
return -EFAULT;
|
|
|
|
kbuf[read_len] = '\0';
|
|
|
|
arg = strstrip(kbuf);
|
|
|
|
|
|
|
|
if (!strcmp(arg, "on")) {
|
|
|
|
WRITE_ONCE(kcsan_enabled, true);
|
|
|
|
} else if (!strcmp(arg, "off")) {
|
|
|
|
WRITE_ONCE(kcsan_enabled, false);
|
|
|
|
} else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
|
|
|
|
unsigned long iters;
|
|
|
|
|
|
|
|
if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
|
|
|
|
return -EINVAL;
|
|
|
|
microbenchmark(iters);
|
2020-02-06 23:46:26 +08:00
|
|
|
} else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
|
|
|
|
unsigned long iters;
|
|
|
|
|
|
|
|
if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
|
|
|
|
return -EINVAL;
|
|
|
|
test_thread(iters);
|
2019-11-15 02:02:54 +08:00
|
|
|
} else if (!strcmp(arg, "whitelist")) {
|
|
|
|
set_report_filterlist_whitelist(true);
|
|
|
|
} else if (!strcmp(arg, "blacklist")) {
|
|
|
|
set_report_filterlist_whitelist(false);
|
|
|
|
} else if (arg[0] == '!') {
|
|
|
|
ssize_t ret = insert_report_filterlist(&arg[1]);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2019-11-20 17:41:43 +08:00
|
|
|
static const struct file_operations debugfs_ops =
|
|
|
|
{
|
|
|
|
.read = seq_read,
|
|
|
|
.open = debugfs_open,
|
|
|
|
.write = debugfs_write,
|
|
|
|
.release = single_release
|
|
|
|
};
|
2019-11-15 02:02:54 +08:00
|
|
|
|
|
|
|
void __init kcsan_debugfs_init(void)
|
|
|
|
{
|
|
|
|
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
|
|
|
|
}
|