netfilter: conntrack: free extension area immediately
Instead of waiting for rcu grace period just free it directly. This is safe because conntrack lookup doesn't consider extensions. Other accesses happen while ct->ext can't be free'd, either because a ct refcount was taken or because the conntrack hash bucket lock or the dying list spinlock have been taken. This allows to remove __krealloc in a followup patch, netfilter was the only user. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
49ca022bcc
commit
2ad9d7747c
|
@ -43,7 +43,6 @@ enum nf_ct_ext_id {
|
||||||
|
|
||||||
/* Extensions: optional stuff which isn't permanently in struct. */
|
/* Extensions: optional stuff which isn't permanently in struct. */
|
||||||
struct nf_ct_ext {
|
struct nf_ct_ext {
|
||||||
struct rcu_head rcu;
|
|
||||||
u8 offset[NF_CT_EXT_NUM];
|
u8 offset[NF_CT_EXT_NUM];
|
||||||
u8 len;
|
u8 len;
|
||||||
char data[0];
|
char data[0];
|
||||||
|
@ -72,15 +71,6 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
|
||||||
/* Destroy all relationships */
|
/* Destroy all relationships */
|
||||||
void nf_ct_ext_destroy(struct nf_conn *ct);
|
void nf_ct_ext_destroy(struct nf_conn *ct);
|
||||||
|
|
||||||
/* Free operation. If you want to free a object referred from private area,
|
|
||||||
* please implement __nf_ct_ext_free() and call it.
|
|
||||||
*/
|
|
||||||
static inline void nf_ct_ext_free(struct nf_conn *ct)
|
|
||||||
{
|
|
||||||
if (ct->ext)
|
|
||||||
kfree_rcu(ct->ext, rcu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add this type, returns pointer to data or NULL. */
|
/* Add this type, returns pointer to data or NULL. */
|
||||||
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
|
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
|
||||||
|
|
||||||
|
|
|
@ -573,7 +573,6 @@ EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
|
||||||
void nf_ct_tmpl_free(struct nf_conn *tmpl)
|
void nf_ct_tmpl_free(struct nf_conn *tmpl)
|
||||||
{
|
{
|
||||||
nf_ct_ext_destroy(tmpl);
|
nf_ct_ext_destroy(tmpl);
|
||||||
nf_ct_ext_free(tmpl);
|
|
||||||
|
|
||||||
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
|
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
|
||||||
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
|
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
|
||||||
|
@ -1417,7 +1416,6 @@ void nf_conntrack_free(struct nf_conn *ct)
|
||||||
WARN_ON(atomic_read(&ct->ct_general.use) != 0);
|
WARN_ON(atomic_read(&ct->ct_general.use) != 0);
|
||||||
|
|
||||||
nf_ct_ext_destroy(ct);
|
nf_ct_ext_destroy(ct);
|
||||||
nf_ct_ext_free(ct);
|
|
||||||
kmem_cache_free(nf_conntrack_cachep, ct);
|
kmem_cache_free(nf_conntrack_cachep, ct);
|
||||||
smp_mb__before_atomic();
|
smp_mb__before_atomic();
|
||||||
atomic_dec(&net->ct.count);
|
atomic_dec(&net->ct.count);
|
||||||
|
|
|
@ -34,21 +34,24 @@ void nf_ct_ext_destroy(struct nf_conn *ct)
|
||||||
t->destroy(ct);
|
t->destroy(ct);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(ct->ext);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(nf_ct_ext_destroy);
|
EXPORT_SYMBOL(nf_ct_ext_destroy);
|
||||||
|
|
||||||
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
|
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
|
||||||
{
|
{
|
||||||
unsigned int newlen, newoff, oldlen, alloc;
|
unsigned int newlen, newoff, oldlen, alloc;
|
||||||
struct nf_ct_ext *old, *new;
|
|
||||||
struct nf_ct_ext_type *t;
|
struct nf_ct_ext_type *t;
|
||||||
|
struct nf_ct_ext *new;
|
||||||
|
|
||||||
/* Conntrack must not be confirmed to avoid races on reallocation. */
|
/* Conntrack must not be confirmed to avoid races on reallocation. */
|
||||||
WARN_ON(nf_ct_is_confirmed(ct));
|
WARN_ON(nf_ct_is_confirmed(ct));
|
||||||
|
|
||||||
old = ct->ext;
|
|
||||||
|
|
||||||
if (old) {
|
if (ct->ext) {
|
||||||
|
const struct nf_ct_ext *old = ct->ext;
|
||||||
|
|
||||||
if (__nf_ct_ext_exist(old, id))
|
if (__nf_ct_ext_exist(old, id))
|
||||||
return NULL;
|
return NULL;
|
||||||
oldlen = old->len;
|
oldlen = old->len;
|
||||||
|
@ -68,22 +71,18 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
alloc = max(newlen, NF_CT_EXT_PREALLOC);
|
alloc = max(newlen, NF_CT_EXT_PREALLOC);
|
||||||
kmemleak_not_leak(old);
|
new = krealloc(ct->ext, alloc, gfp);
|
||||||
new = __krealloc(old, alloc, gfp);
|
|
||||||
if (!new)
|
if (!new)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (!old) {
|
if (!ct->ext)
|
||||||
memset(new->offset, 0, sizeof(new->offset));
|
memset(new->offset, 0, sizeof(new->offset));
|
||||||
ct->ext = new;
|
|
||||||
} else if (new != old) {
|
|
||||||
kfree_rcu(old, rcu);
|
|
||||||
rcu_assign_pointer(ct->ext, new);
|
|
||||||
}
|
|
||||||
|
|
||||||
new->offset[id] = newoff;
|
new->offset[id] = newoff;
|
||||||
new->len = newlen;
|
new->len = newlen;
|
||||||
memset((void *)new + newoff, 0, newlen - newoff);
|
memset((void *)new + newoff, 0, newlen - newoff);
|
||||||
|
|
||||||
|
ct->ext = new;
|
||||||
return (void *)new + newoff;
|
return (void *)new + newoff;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(nf_ct_ext_add);
|
EXPORT_SYMBOL(nf_ct_ext_add);
|
||||||
|
|
Loading…
Reference in New Issue