ucount: Remove the atomicity from ucount->count
Always increment/decrement ucount->count under the ucounts_lock. The
increments are there already and moving the decrements there means the
locking logic of the code is simpler. This simplification in the
locking logic fixes a race between put_ucounts and get_ucounts that
could result in a use-after-free because the count could go zero then
be found by get_ucounts and then be freed by put_ucounts.
A bug presumably this one was found by a combination of syzkaller and
KASAN. JongWhan Kim reported the syzkaller failure and Dmitry Vyukov
spotted the race in the code.
Cc: stable@vger.kernel.org
Fixes: f6b2db1a3e
("userns: Make the count of user namespaces per user")
Reported-by: JongHwan Kim <zzoru007@gmail.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Andrei Vagin <avagin@gmail.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
parent
c1ae3cfa0e
commit
040757f738
|
@ -72,7 +72,7 @@ struct ucounts {
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
struct user_namespace *ns;
|
struct user_namespace *ns;
|
||||||
kuid_t uid;
|
kuid_t uid;
|
||||||
atomic_t count;
|
int count;
|
||||||
atomic_t ucount[UCOUNT_COUNTS];
|
atomic_t ucount[UCOUNT_COUNTS];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
|
||||||
|
|
||||||
new->ns = ns;
|
new->ns = ns;
|
||||||
new->uid = uid;
|
new->uid = uid;
|
||||||
atomic_set(&new->count, 0);
|
new->count = 0;
|
||||||
|
|
||||||
spin_lock_irq(&ucounts_lock);
|
spin_lock_irq(&ucounts_lock);
|
||||||
ucounts = find_ucounts(ns, uid, hashent);
|
ucounts = find_ucounts(ns, uid, hashent);
|
||||||
|
@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
|
||||||
ucounts = new;
|
ucounts = new;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
|
if (ucounts->count == INT_MAX)
|
||||||
ucounts = NULL;
|
ucounts = NULL;
|
||||||
|
else
|
||||||
|
ucounts->count += 1;
|
||||||
spin_unlock_irq(&ucounts_lock);
|
spin_unlock_irq(&ucounts_lock);
|
||||||
return ucounts;
|
return ucounts;
|
||||||
}
|
}
|
||||||
|
@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ucounts->count)) {
|
spin_lock_irqsave(&ucounts_lock, flags);
|
||||||
spin_lock_irqsave(&ucounts_lock, flags);
|
ucounts->count -= 1;
|
||||||
|
if (!ucounts->count)
|
||||||
hlist_del_init(&ucounts->node);
|
hlist_del_init(&ucounts->node);
|
||||||
spin_unlock_irqrestore(&ucounts_lock, flags);
|
else
|
||||||
|
ucounts = NULL;
|
||||||
|
spin_unlock_irqrestore(&ucounts_lock, flags);
|
||||||
|
|
||||||
kfree(ucounts);
|
kfree(ucounts);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool atomic_inc_below(atomic_t *v, int u)
|
static inline bool atomic_inc_below(atomic_t *v, int u)
|
||||||
|
|
Loading…
Reference in New Issue