Merge branch 'handle_ing_lightweight'

Daniel Borkmann says:

====================
handle_ing update

These are a couple of cleanups to make ingress a bit more lightweight.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-05-11 11:10:35 -04:00
commit 3bb45001ac
3 changed files with 33 additions and 77 deletions

View File

@ -1655,7 +1655,11 @@ struct net_device {
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
#if CONFIG_NET_CLS_ACT
struct tcf_proto __rcu *ingress_cl_list;
#endif
struct netdev_queue __rcu *ingress_queue;
unsigned char broadcast[MAX_ADDR_LEN];
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rx_cpu_rmap;

View File

@ -3521,49 +3521,41 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
* a compare and 2 stores extra right now if we dont have it on
* but have CONFIG_NET_CLS_ACT
* NOTE: This doesn't stop any functionality; if you dont have
* the ingress scheduler, you just can't add policies on ingress.
*
*/
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
{
int result = TC_ACT_OK;
struct Qdisc *q;
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
q = rcu_dereference(rxq->qdisc);
if (q != &noop_qdisc) {
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
result = qdisc_enqueue_root(skb, q);
}
return result;
}
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
struct tcf_result cl_res;
if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
/* If there's at least one ingress present somewhere (so
* we get here via enabled static key), remaining devices
* that are not configured with an ingress qdisc will bail
* out here.
*/
if (!cl)
return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
switch (ing_filter(skb, rxq)) {
qdisc_bstats_update_cpu(cl->q, skb);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
switch (tc_classify(skb, cl, &cl_res)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
break;
case TC_ACT_SHOT:
qdisc_qstats_drop_cpu(cl->q);
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
kfree_skb(skb);
return NULL;
default:
break;
}
return skb;

View File

@ -12,16 +12,10 @@
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
struct ingress_qdisc_data {
struct tcf_proto __rcu *filter_list;
};
/* ------------------------- Class/flow operations ------------------------- */
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
@ -49,45 +43,11 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
unsigned long cl)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
return &p->filter_list;
return &dev->ingress_cl_list;
}
/* --------------------------- Qdisc operations ---------------------------- */
static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct tcf_result res;
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
int result;
result = tc_classify(skb, fl, &res);
qdisc_bstats_update_cpu(sch, skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
qdisc_qstats_drop_cpu(sch);
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
result = TC_ACT_STOLEN;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
skb->tc_index = TC_H_MIN(res.classid);
default:
result = TC_ACT_OK;
break;
}
return result;
}
/* ------------------------------------------------------------- */
static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
{
net_inc_ingress_queue();
@ -98,9 +58,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
tcf_destroy_chain(&p->filter_list);
tcf_destroy_chain(&dev->ingress_cl_list);
net_dec_ingress_queue();
}
@ -111,6 +71,7 @@ static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
@ -131,8 +92,6 @@ static const struct Qdisc_class_ops ingress_class_ops = {
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_qdisc_data),
.enqueue = ingress_enqueue,
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
@ -149,6 +108,7 @@ static void __exit ingress_module_exit(void)
unregister_qdisc(&ingress_qdisc_ops);
}
module_init(ingress_module_init)
module_exit(ingress_module_exit)
module_init(ingress_module_init);
module_exit(ingress_module_exit);
MODULE_LICENSE("GPL");