net: convert inet_frag_queue.refcnt from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
717d1e993a
commit
edcb691871
|
@ -50,7 +50,7 @@ struct inet_frag_queue {
|
|||
spinlock_t lock;
|
||||
struct timer_list timer;
|
||||
struct hlist_node list;
|
||||
atomic_t refcnt;
|
||||
refcount_t refcnt;
|
||||
struct sk_buff *fragments;
|
||||
struct sk_buff *fragments_tail;
|
||||
ktime_t stamp;
|
||||
|
@ -129,7 +129,7 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
|
|||
|
||||
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
|
||||
{
|
||||
if (atomic_dec_and_test(&q->refcnt))
|
||||
if (refcount_dec_and_test(&q->refcnt))
|
||||
inet_frag_destroy(q, f);
|
||||
}
|
||||
|
||||
|
|
|
@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|||
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
||||
{
|
||||
if (del_timer(&fq->timer))
|
||||
atomic_dec(&fq->refcnt);
|
||||
refcount_dec(&fq->refcnt);
|
||||
|
||||
if (!(fq->flags & INET_FRAG_COMPLETE)) {
|
||||
fq_unlink(fq, f);
|
||||
atomic_dec(&fq->refcnt);
|
||||
refcount_dec(&fq->refcnt);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_kill);
|
||||
|
@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|||
*/
|
||||
hlist_for_each_entry(qp, &hb->chain, list) {
|
||||
if (qp->net == nf && f->match(qp, arg)) {
|
||||
atomic_inc(&qp->refcnt);
|
||||
refcount_inc(&qp->refcnt);
|
||||
spin_unlock(&hb->chain_lock);
|
||||
qp_in->flags |= INET_FRAG_COMPLETE;
|
||||
inet_frag_put(qp_in, f);
|
||||
|
@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|||
#endif
|
||||
qp = qp_in;
|
||||
if (!mod_timer(&qp->timer, jiffies + nf->timeout))
|
||||
atomic_inc(&qp->refcnt);
|
||||
refcount_inc(&qp->refcnt);
|
||||
|
||||
atomic_inc(&qp->refcnt);
|
||||
refcount_inc(&qp->refcnt);
|
||||
hlist_add_head(&qp->list, &hb->chain);
|
||||
|
||||
spin_unlock(&hb->chain_lock);
|
||||
|
@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
|||
|
||||
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
|
||||
spin_lock_init(&q->lock);
|
||||
atomic_set(&q->refcnt, 1);
|
||||
refcount_set(&q->refcnt, 1);
|
||||
|
||||
return q;
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|||
spin_lock(&hb->chain_lock);
|
||||
hlist_for_each_entry(q, &hb->chain, list) {
|
||||
if (q->net == nf && f->match(q, key)) {
|
||||
atomic_inc(&q->refcnt);
|
||||
refcount_inc(&q->refcnt);
|
||||
spin_unlock(&hb->chain_lock);
|
||||
return q;
|
||||
}
|
||||
|
|
|
@ -312,7 +312,7 @@ static int ip_frag_reinit(struct ipq *qp)
|
|||
unsigned int sum_truesize = 0;
|
||||
|
||||
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
|
||||
atomic_inc(&qp->q.refcnt);
|
||||
refcount_inc(&qp->q.refcnt);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue