mirror of https://gitee.com/openkylin/linux.git
Merge branch 'timers/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/urgent
Pull nohz updates/fixes from Frederic Weisbecker: ' Note that "watchdog: Boot-disable by default on full dynticks" is a temporary solution to solve the issue with the watchdog that prevents the tick from stopping. This is to make sure that 3.11 doesn't have that problem as several people complained about it. A proper and longer term solution has been proposed by Peterz: http://lkml.kernel.org/r/20130618103632.GO3204@twins.programming.kicks-ass.net ' Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
e399eb56a6
|
@ -46,7 +46,7 @@ static inline bool trigger_all_cpu_backtrace(void)
|
|||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
int hw_nmi_is_cpu_stuck(struct pt_regs *);
|
||||
u64 hw_nmi_get_sample_period(int watchdog_thresh);
|
||||
extern int watchdog_enabled;
|
||||
extern int watchdog_user_enabled;
|
||||
extern int watchdog_thresh;
|
||||
struct ctl_table;
|
||||
extern int proc_dowatchdog(struct ctl_table *, int ,
|
||||
|
|
|
@ -800,7 +800,7 @@ static struct ctl_table kern_table[] = {
|
|||
#if defined(CONFIG_LOCKUP_DETECTOR)
|
||||
{
|
||||
.procname = "watchdog",
|
||||
.data = &watchdog_enabled,
|
||||
.data = &watchdog_user_enabled,
|
||||
.maxlen = sizeof (int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dowatchdog,
|
||||
|
@ -827,7 +827,7 @@ static struct ctl_table kern_table[] = {
|
|||
},
|
||||
{
|
||||
.procname = "nmi_watchdog",
|
||||
.data = &watchdog_enabled,
|
||||
.data = &watchdog_user_enabled,
|
||||
.maxlen = sizeof (int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dowatchdog,
|
||||
|
|
|
@ -178,6 +178,11 @@ static bool can_stop_full_tick(void)
|
|||
*/
|
||||
if (!sched_clock_stable) {
|
||||
trace_tick_stop(0, "unstable sched clock\n");
|
||||
/*
|
||||
* Don't allow the user to think they can get
|
||||
* full NO_HZ with this machine.
|
||||
*/
|
||||
WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -346,16 +351,6 @@ void __init tick_nohz_init(void)
|
|||
}
|
||||
|
||||
cpu_notifier(tick_nohz_cpu_down_callback, 0);
|
||||
|
||||
/* Make sure full dynticks CPU are also RCU nocbs */
|
||||
for_each_cpu(cpu, nohz_full_mask) {
|
||||
if (!rcu_is_nocb_cpu(cpu)) {
|
||||
pr_warning("NO_HZ: CPU %d is not RCU nocb: "
|
||||
"cleared from nohz_full range", cpu);
|
||||
cpumask_clear_cpu(cpu, nohz_full_mask);
|
||||
}
|
||||
}
|
||||
|
||||
cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
|
||||
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
|
||||
}
|
||||
|
|
|
@ -29,9 +29,9 @@
|
|||
#include <linux/kvm_para.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
int watchdog_enabled = 1;
|
||||
int watchdog_user_enabled = 1;
|
||||
int __read_mostly watchdog_thresh = 10;
|
||||
static int __read_mostly watchdog_disabled;
|
||||
static int __read_mostly watchdog_running;
|
||||
static u64 __read_mostly sample_period;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
|
||||
|
@ -63,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str)
|
|||
else if (!strncmp(str, "nopanic", 7))
|
||||
hardlockup_panic = 0;
|
||||
else if (!strncmp(str, "0", 1))
|
||||
watchdog_enabled = 0;
|
||||
watchdog_user_enabled = 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("nmi_watchdog=", hardlockup_panic_setup);
|
||||
|
@ -82,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
|
|||
|
||||
static int __init nowatchdog_setup(char *str)
|
||||
{
|
||||
watchdog_enabled = 0;
|
||||
watchdog_user_enabled = 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("nowatchdog", nowatchdog_setup);
|
||||
|
@ -90,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup);
|
|||
/* deprecated */
|
||||
static int __init nosoftlockup_setup(char *str)
|
||||
{
|
||||
watchdog_enabled = 0;
|
||||
watchdog_user_enabled = 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("nosoftlockup", nosoftlockup_setup);
|
||||
|
@ -158,7 +158,7 @@ void touch_all_softlockup_watchdogs(void)
|
|||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||
void touch_nmi_watchdog(void)
|
||||
{
|
||||
if (watchdog_enabled) {
|
||||
if (watchdog_user_enabled) {
|
||||
unsigned cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
|
@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu)
|
|||
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer->function = watchdog_timer_fn;
|
||||
|
||||
if (!watchdog_enabled) {
|
||||
kthread_park(current);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Enable the perf event */
|
||||
watchdog_nmi_enable(cpu);
|
||||
|
||||
|
@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu)
|
|||
watchdog_nmi_disable(cpu);
|
||||
}
|
||||
|
||||
static void watchdog_cleanup(unsigned int cpu, bool online)
|
||||
{
|
||||
watchdog_disable(cpu);
|
||||
}
|
||||
|
||||
static int watchdog_should_run(unsigned int cpu)
|
||||
{
|
||||
return __this_cpu_read(hrtimer_interrupts) !=
|
||||
|
@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
|
|||
static void watchdog_nmi_disable(unsigned int cpu) { return; }
|
||||
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
|
||||
|
||||
static struct smp_hotplug_thread watchdog_threads = {
|
||||
.store = &softlockup_watchdog,
|
||||
.thread_should_run = watchdog_should_run,
|
||||
.thread_fn = watchdog,
|
||||
.thread_comm = "watchdog/%u",
|
||||
.setup = watchdog_enable,
|
||||
.cleanup = watchdog_cleanup,
|
||||
.park = watchdog_disable,
|
||||
.unpark = watchdog_enable,
|
||||
};
|
||||
|
||||
static int watchdog_enable_all_cpus(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!watchdog_running) {
|
||||
err = smpboot_register_percpu_thread(&watchdog_threads);
|
||||
if (err)
|
||||
pr_err("Failed to create watchdog threads, disabled\n");
|
||||
else
|
||||
watchdog_running = 1;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* prepare/enable/disable routines */
|
||||
/* sysctl functions */
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static void watchdog_enable_all_cpus(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
if (watchdog_disabled) {
|
||||
watchdog_disabled = 0;
|
||||
for_each_online_cpu(cpu)
|
||||
kthread_unpark(per_cpu(softlockup_watchdog, cpu));
|
||||
}
|
||||
}
|
||||
|
||||
static void watchdog_disable_all_cpus(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
if (!watchdog_disabled) {
|
||||
watchdog_disabled = 1;
|
||||
for_each_online_cpu(cpu)
|
||||
kthread_park(per_cpu(softlockup_watchdog, cpu));
|
||||
if (watchdog_running) {
|
||||
watchdog_running = 0;
|
||||
smpboot_unregister_percpu_thread(&watchdog_threads);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -507,45 +519,48 @@ static void watchdog_disable_all_cpus(void)
|
|||
int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
int err, old_thresh, old_enabled;
|
||||
|
||||
if (watchdog_disabled < 0)
|
||||
return -ENODEV;
|
||||
old_thresh = ACCESS_ONCE(watchdog_thresh);
|
||||
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (ret || !write)
|
||||
return ret;
|
||||
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (err || !write)
|
||||
return err;
|
||||
|
||||
set_sample_period();
|
||||
/*
|
||||
* Watchdog threads shouldn't be enabled if they are
|
||||
* disabled. The 'watchdog_disabled' variable check in
|
||||
* disabled. The 'watchdog_running' variable check in
|
||||
* watchdog_*_all_cpus() function takes care of this.
|
||||
*/
|
||||
if (watchdog_enabled && watchdog_thresh)
|
||||
watchdog_enable_all_cpus();
|
||||
if (watchdog_user_enabled && watchdog_thresh)
|
||||
err = watchdog_enable_all_cpus();
|
||||
else
|
||||
watchdog_disable_all_cpus();
|
||||
|
||||
return ret;
|
||||
/* Restore old values on failure */
|
||||
if (err) {
|
||||
watchdog_thresh = old_thresh;
|
||||
watchdog_user_enabled = old_enabled;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static struct smp_hotplug_thread watchdog_threads = {
|
||||
.store = &softlockup_watchdog,
|
||||
.thread_should_run = watchdog_should_run,
|
||||
.thread_fn = watchdog,
|
||||
.thread_comm = "watchdog/%u",
|
||||
.setup = watchdog_enable,
|
||||
.park = watchdog_disable,
|
||||
.unpark = watchdog_enable,
|
||||
};
|
||||
|
||||
void __init lockup_detector_init(void)
|
||||
{
|
||||
set_sample_period();
|
||||
if (smpboot_register_percpu_thread(&watchdog_threads)) {
|
||||
pr_err("Failed to create watchdog threads, disabled\n");
|
||||
watchdog_disabled = -ENODEV;
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (watchdog_user_enabled) {
|
||||
watchdog_user_enabled = 0;
|
||||
pr_warning("Disabled lockup detectors by default for full dynticks\n");
|
||||
pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (watchdog_user_enabled)
|
||||
watchdog_enable_all_cpus();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue