timers: Move clearing of base::timer_running under base:: Lock
syzbot reported KCSAN data races vs. timer_base::timer_running being set to
NULL without holding base::lock in expire_timers().
This looks innocent and most reads are clearly not problematic, but
Frederic identified an issue which is:
int data = 0;
void timer_func(struct timer_list *t)
{
data = 1;
}
CPU 0 CPU 1
------------------------------ --------------------------
base = lock_timer_base(timer, &flags); raw_spin_unlock(&base->lock);
if (base->running_timer != timer) call_timer_fn(timer, fn, baseclk);
ret = detach_if_pending(timer, base, true); base->running_timer = NULL;
raw_spin_unlock_irqrestore(&base->lock, flags); raw_spin_lock(&base->lock);
x = data;
If the timer has previously executed on CPU 1 and then CPU 0 can observe
base->running_timer == NULL and returns, assuming the timer has completed,
but it's not guaranteed on all architectures. The comment for
del_timer_sync() makes that guarantee. Moving the assignment under
base->lock prevents this.
For non-RT kernel it's performance wise completely irrelevant whether the
store happens before or after taking the lock. For an RT kernel moving the
store under the lock requires an extra unlock/lock pair in the case that
there is a waiter for the timer, but that's not the end of the world.
Reported-by: syzbot+aa7c2385d46c5eba0b89@syzkaller.appspotmail.com
Reported-by: syzbot+abea4558531bae1ba9fe@syzkaller.appspotmail.com
Fixes: 030dcdd197
("timers: Prepare support for PREEMPT_RT")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/87lfea7gw8.fsf@nanos.tec.linutronix.de
Cc: stable@vger.kernel.org
This commit is contained in:
parent
ff1176468d
commit
bb7262b295
|
@ -1265,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
|
||||||
static void timer_sync_wait_running(struct timer_base *base)
|
static void timer_sync_wait_running(struct timer_base *base)
|
||||||
{
|
{
|
||||||
if (atomic_read(&base->timer_waiters)) {
|
if (atomic_read(&base->timer_waiters)) {
|
||||||
|
raw_spin_unlock_irq(&base->lock);
|
||||||
spin_unlock(&base->expiry_lock);
|
spin_unlock(&base->expiry_lock);
|
||||||
spin_lock(&base->expiry_lock);
|
spin_lock(&base->expiry_lock);
|
||||||
|
raw_spin_lock_irq(&base->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1457,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
|
||||||
if (timer->flags & TIMER_IRQSAFE) {
|
if (timer->flags & TIMER_IRQSAFE) {
|
||||||
raw_spin_unlock(&base->lock);
|
raw_spin_unlock(&base->lock);
|
||||||
call_timer_fn(timer, fn, baseclk);
|
call_timer_fn(timer, fn, baseclk);
|
||||||
base->running_timer = NULL;
|
|
||||||
raw_spin_lock(&base->lock);
|
raw_spin_lock(&base->lock);
|
||||||
|
base->running_timer = NULL;
|
||||||
} else {
|
} else {
|
||||||
raw_spin_unlock_irq(&base->lock);
|
raw_spin_unlock_irq(&base->lock);
|
||||||
call_timer_fn(timer, fn, baseclk);
|
call_timer_fn(timer, fn, baseclk);
|
||||||
|
raw_spin_lock_irq(&base->lock);
|
||||||
base->running_timer = NULL;
|
base->running_timer = NULL;
|
||||||
timer_sync_wait_running(base);
|
timer_sync_wait_running(base);
|
||||||
raw_spin_lock_irq(&base->lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue