Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core fixes from Ingo Molnar:
- workaround for gcc asm handling
- futex race fixes
- objtool build warning fix
- two watchdog fixes: a crash fix (revert) and a bug fix for
/proc/sys/kernel/watchdog_thresh handling.
* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
objtool: Prevent GCC from merging annotate_unreachable(), take 2
objtool: Resync objtool's instruction decoder source code copy with the kernel's latest version
watchdog/hardlockup/perf: Use atomics to track in-use cpu counter
watchdog/harclockup/perf: Revert a33d44843d
("watchdog/hardlockup/perf: Simplify deferred event destroy")
futex: Fix more put_pi_state() vs. exit_pi_state_list() races
This commit is contained in:
commit
b772b8e3ab
|
@ -191,13 +191,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
asm("%c0:\n\t" \
|
||||
".pushsection .discard.reachable\n\t" \
|
||||
".long %c0b - .\n\t" \
|
||||
".popsection\n\t" : : "i" (__LINE__)); \
|
||||
".popsection\n\t" : : "i" (__COUNTER__)); \
|
||||
})
|
||||
#define annotate_unreachable() ({ \
|
||||
asm("%c0:\n\t" \
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
".long %c0b - .\n\t" \
|
||||
".popsection\n\t" : : "i" (__LINE__)); \
|
||||
".popsection\n\t" : : "i" (__COUNTER__)); \
|
||||
})
|
||||
#define ASM_UNREACHABLE \
|
||||
"999:\n\t" \
|
||||
|
|
|
@ -903,11 +903,27 @@ void exit_pi_state_list(struct task_struct *curr)
|
|||
*/
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
while (!list_empty(head)) {
|
||||
|
||||
next = head->next;
|
||||
pi_state = list_entry(next, struct futex_pi_state, list);
|
||||
key = pi_state->key;
|
||||
hb = hash_futex(&key);
|
||||
|
||||
/*
|
||||
* We can race against put_pi_state() removing itself from the
|
||||
* list (a waiter going away). put_pi_state() will first
|
||||
* decrement the reference count and then modify the list, so
|
||||
* its possible to see the list entry but fail this reference
|
||||
* acquire.
|
||||
*
|
||||
* In that case; drop the locks to let put_pi_state() make
|
||||
* progress and retry the loop.
|
||||
*/
|
||||
if (!atomic_inc_not_zero(&pi_state->refcount)) {
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
cpu_relax();
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
continue;
|
||||
}
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
|
@ -918,8 +934,10 @@ void exit_pi_state_list(struct task_struct *curr)
|
|||
* task still owns the PI-state:
|
||||
*/
|
||||
if (head->next != next) {
|
||||
/* retain curr->pi_lock for the loop invariant */
|
||||
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(&hb->lock);
|
||||
put_pi_state(pi_state);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -927,9 +945,8 @@ void exit_pi_state_list(struct task_struct *curr)
|
|||
WARN_ON(list_empty(&pi_state->list));
|
||||
list_del_init(&pi_state->list);
|
||||
pi_state->owner = NULL;
|
||||
raw_spin_unlock(&curr->pi_lock);
|
||||
|
||||
get_pi_state(pi_state);
|
||||
raw_spin_unlock(&curr->pi_lock);
|
||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(&hb->lock);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define pr_fmt(fmt) "NMI watchdog: " fmt
|
||||
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/debug.h>
|
||||
|
||||
|
@ -22,10 +23,11 @@
|
|||
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
|
||||
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
|
||||
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
|
||||
static DEFINE_PER_CPU(struct perf_event *, dead_event);
|
||||
static struct cpumask dead_events_mask;
|
||||
|
||||
static unsigned long hardlockup_allcpu_dumped;
|
||||
static unsigned int watchdog_cpus;
|
||||
static atomic_t watchdog_cpus = ATOMIC_INIT(0);
|
||||
|
||||
void arch_touch_nmi_watchdog(void)
|
||||
{
|
||||
|
@ -189,7 +191,8 @@ void hardlockup_detector_perf_enable(void)
|
|||
if (hardlockup_detector_event_create())
|
||||
return;
|
||||
|
||||
if (!watchdog_cpus++)
|
||||
/* use original value for check */
|
||||
if (!atomic_fetch_inc(&watchdog_cpus))
|
||||
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
|
||||
|
||||
perf_event_enable(this_cpu_read(watchdog_ev));
|
||||
|
@ -204,8 +207,10 @@ void hardlockup_detector_perf_disable(void)
|
|||
|
||||
if (event) {
|
||||
perf_event_disable(event);
|
||||
this_cpu_write(watchdog_ev, NULL);
|
||||
this_cpu_write(dead_event, event);
|
||||
cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
|
||||
watchdog_cpus--;
|
||||
atomic_dec(&watchdog_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -219,7 +224,7 @@ void hardlockup_detector_perf_cleanup(void)
|
|||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, &dead_events_mask) {
|
||||
struct perf_event *event = per_cpu(watchdog_ev, cpu);
|
||||
struct perf_event *event = per_cpu(dead_event, cpu);
|
||||
|
||||
/*
|
||||
* Required because for_each_cpu() reports unconditionally
|
||||
|
@ -227,7 +232,7 @@ void hardlockup_detector_perf_cleanup(void)
|
|||
*/
|
||||
if (event)
|
||||
perf_event_release_kernel(event);
|
||||
per_cpu(watchdog_ev, cpu) = NULL;
|
||||
per_cpu(dead_event, cpu) = NULL;
|
||||
}
|
||||
cpumask_clear(&dead_events_mask);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#!/bin/awk -f
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# gen-insn-attr-x86.awk: Instruction attribute table generator
|
||||
# Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||
#
|
||||
|
|
Loading…
Reference in New Issue