netfilter: nfnetlink_queue: return error number to caller
instead of returning -1 on error, return an error number to allow the caller to handle some errors differently. ECANCELED is used to indicate that the hook is going away and should be ignored. A followup patch will introduce more 'ignore this hook' conditions, (depending on queue settings) and will move kfree_skb responsibility to the caller. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
parent
5f2cafe736
commit
f158508618
|
@ -179,9 +179,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
|
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
|
||||||
if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
|
ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
|
||||||
verdict >> NF_VERDICT_BITS))
|
verdict >> NF_VERDICT_BITS);
|
||||||
|
if (ret == -ECANCELED)
|
||||||
goto next_hook;
|
goto next_hook;
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -125,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
|
||||||
int (*okfn)(struct sk_buff *),
|
int (*okfn)(struct sk_buff *),
|
||||||
unsigned int queuenum)
|
unsigned int queuenum)
|
||||||
{
|
{
|
||||||
int status;
|
int status = -ENOENT;
|
||||||
struct nf_queue_entry *entry = NULL;
|
struct nf_queue_entry *entry = NULL;
|
||||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||||
struct net_device *physindev;
|
struct net_device *physindev;
|
||||||
|
@ -146,8 +146,10 @@ static int __nf_queue(struct sk_buff *skb,
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|
||||||
entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
|
entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
|
||||||
if (!entry)
|
if (!entry) {
|
||||||
|
status = -ENOMEM;
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
*entry = (struct nf_queue_entry) {
|
*entry = (struct nf_queue_entry) {
|
||||||
.skb = skb,
|
.skb = skb,
|
||||||
|
@ -163,9 +165,8 @@ static int __nf_queue(struct sk_buff *skb,
|
||||||
if (!try_module_get(entry->elem->owner)) {
|
if (!try_module_get(entry->elem->owner)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
return 0;
|
return -ECANCELED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bump dev refs so they don't vanish while packet is out */
|
/* Bump dev refs so they don't vanish while packet is out */
|
||||||
if (indev)
|
if (indev)
|
||||||
dev_hold(indev);
|
dev_hold(indev);
|
||||||
|
@ -192,14 +193,14 @@ static int __nf_queue(struct sk_buff *skb,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 0;
|
||||||
|
|
||||||
err_unlock:
|
err_unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
err:
|
err:
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
return 1;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nf_queue(struct sk_buff *skb,
|
int nf_queue(struct sk_buff *skb,
|
||||||
|
@ -211,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
|
||||||
unsigned int queuenum)
|
unsigned int queuenum)
|
||||||
{
|
{
|
||||||
struct sk_buff *segs;
|
struct sk_buff *segs;
|
||||||
|
int err;
|
||||||
|
unsigned int queued;
|
||||||
|
|
||||||
if (!skb_is_gso(skb))
|
if (!skb_is_gso(skb))
|
||||||
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
|
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
|
||||||
|
@ -227,19 +230,32 @@ int nf_queue(struct sk_buff *skb,
|
||||||
|
|
||||||
segs = skb_gso_segment(skb, 0);
|
segs = skb_gso_segment(skb, 0);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
/* Does not use PTR_ERR to limit the number of error codes that can be
|
||||||
|
* returned by nf_queue. For instance, callers rely on -ECANCELED to mean
|
||||||
|
* 'ignore this hook'.
|
||||||
|
*/
|
||||||
if (IS_ERR(segs))
|
if (IS_ERR(segs))
|
||||||
return 1;
|
return -EINVAL;
|
||||||
|
|
||||||
|
queued = 0;
|
||||||
|
err = 0;
|
||||||
do {
|
do {
|
||||||
struct sk_buff *nskb = segs->next;
|
struct sk_buff *nskb = segs->next;
|
||||||
|
|
||||||
segs->next = NULL;
|
segs->next = NULL;
|
||||||
if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
|
if (err == 0)
|
||||||
queuenum))
|
err = __nf_queue(segs, elem, pf, hook, indev,
|
||||||
|
outdev, okfn, queuenum);
|
||||||
|
if (err == 0)
|
||||||
|
queued++;
|
||||||
|
else
|
||||||
kfree_skb(segs);
|
kfree_skb(segs);
|
||||||
segs = nskb;
|
segs = nskb;
|
||||||
} while (segs);
|
} while (segs);
|
||||||
return 1;
|
|
||||||
|
if (unlikely(err && queued))
|
||||||
|
err = 0;
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||||
|
@ -247,6 +263,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||||
struct sk_buff *skb = entry->skb;
|
struct sk_buff *skb = entry->skb;
|
||||||
struct list_head *elem = &entry->elem->list;
|
struct list_head *elem = &entry->elem->list;
|
||||||
const struct nf_afinfo *afinfo;
|
const struct nf_afinfo *afinfo;
|
||||||
|
int err;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
|
@ -280,9 +297,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
break;
|
break;
|
||||||
case NF_QUEUE:
|
case NF_QUEUE:
|
||||||
if (!__nf_queue(skb, elem, entry->pf, entry->hook,
|
err = __nf_queue(skb, elem, entry->pf, entry->hook,
|
||||||
entry->indev, entry->outdev, entry->okfn,
|
entry->indev, entry->outdev, entry->okfn,
|
||||||
verdict >> NF_VERDICT_BITS))
|
verdict >> NF_VERDICT_BITS);
|
||||||
|
if (err == -ECANCELED)
|
||||||
goto next_hook;
|
goto next_hook;
|
||||||
break;
|
break;
|
||||||
case NF_STOLEN:
|
case NF_STOLEN:
|
||||||
|
|
|
@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
||||||
{
|
{
|
||||||
struct sk_buff *nskb;
|
struct sk_buff *nskb;
|
||||||
struct nfqnl_instance *queue;
|
struct nfqnl_instance *queue;
|
||||||
int err;
|
int err = -ENOBUFS;
|
||||||
|
|
||||||
/* rcu_read_lock()ed by nf_hook_slow() */
|
/* rcu_read_lock()ed by nf_hook_slow() */
|
||||||
queue = instance_lookup(queuenum);
|
queue = instance_lookup(queuenum);
|
||||||
if (!queue)
|
if (!queue) {
|
||||||
|
err = -ESRCH;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
if (queue->copy_mode == NFQNL_COPY_NONE)
|
if (queue->copy_mode == NFQNL_COPY_NONE) {
|
||||||
|
err = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
nskb = nfqnl_build_packet_message(queue, entry);
|
nskb = nfqnl_build_packet_message(queue, entry);
|
||||||
if (nskb == NULL)
|
if (nskb == NULL) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
}
|
||||||
spin_lock_bh(&queue->lock);
|
spin_lock_bh(&queue->lock);
|
||||||
|
|
||||||
if (!queue->peer_pid)
|
if (!queue->peer_pid) {
|
||||||
|
err = -EINVAL;
|
||||||
goto err_out_free_nskb;
|
goto err_out_free_nskb;
|
||||||
|
}
|
||||||
if (queue->queue_total >= queue->queue_maxlen) {
|
if (queue->queue_total >= queue->queue_maxlen) {
|
||||||
queue->queue_dropped++;
|
queue->queue_dropped++;
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
|
@ -432,7 +438,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
||||||
err_out_unlock:
|
err_out_unlock:
|
||||||
spin_unlock_bh(&queue->lock);
|
spin_unlock_bh(&queue->lock);
|
||||||
err_out:
|
err_out:
|
||||||
return -1;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
Loading…
Reference in New Issue