net/sched: store the last executed chain also for clsact egress
currently, only 'ingress' and 'clsact ingress' qdiscs store the tc 'chain id' in the skb extension. However, userspace programs (like ovs) are able to setup egress rules, and datapath gets confused in case it doesn't find the 'chain id' for a packet that's "recirculated" by tc. Change tcf_classify() to have the same semantic as tcf_classify_ingress() so that a single function can be called in ingress / egress, using the tc ingress / egress block respectively. Suggested-by: Alaa Hleilel <alaa@nvidia.com> Signed-off-by: Davide Caratti <dcaratti@redhat.com> Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b2492d503b
commit
3aa2605594
|
@ -76,12 +76,10 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
|||
return block->q;
|
||||
}
|
||||
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode);
|
||||
int tcf_classify_ingress(struct sk_buff *skb,
|
||||
const struct tcf_block *ingress_block,
|
||||
const struct tcf_proto *tp, struct tcf_result *res,
|
||||
bool compat_mode);
|
||||
int tcf_classify(struct sk_buff *skb,
|
||||
const struct tcf_block *block,
|
||||
const struct tcf_proto *tp, struct tcf_result *res,
|
||||
bool compat_mode);
|
||||
|
||||
#else
|
||||
static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
|
@ -138,20 +136,14 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
|
|||
{
|
||||
}
|
||||
|
||||
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
static inline int tcf_classify(struct sk_buff *skb,
|
||||
const struct tcf_block *block,
|
||||
const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
return TC_ACT_UNSPEC;
|
||||
}
|
||||
|
||||
static inline int tcf_classify_ingress(struct sk_buff *skb,
|
||||
const struct tcf_block *ingress_block,
|
||||
const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
return TC_ACT_UNSPEC;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long
|
||||
|
|
|
@ -4012,7 +4012,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
|
|||
qdisc_skb_cb(skb)->post_ct = false;
|
||||
mini_qdisc_bstats_cpu_update(miniq, skb);
|
||||
|
||||
switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
|
||||
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
|
||||
case TC_ACT_OK:
|
||||
case TC_ACT_RECLASSIFY:
|
||||
skb->tc_index = TC_H_MIN(cl_res.classid);
|
||||
|
@ -5164,8 +5164,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
|
|||
skb->tc_at_ingress = 1;
|
||||
mini_qdisc_bstats_cpu_update(miniq, skb);
|
||||
|
||||
switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
|
||||
&cl_res, false)) {
|
||||
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
|
||||
case TC_ACT_OK:
|
||||
case TC_ACT_RECLASSIFY:
|
||||
skb->tc_index = TC_H_MIN(cl_res.classid);
|
||||
|
|
|
@ -1577,20 +1577,10 @@ static inline int __tcf_classify(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
int tcf_classify(struct sk_buff *skb,
|
||||
const struct tcf_block *block,
|
||||
const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
u32 last_executed_chain = 0;
|
||||
|
||||
return __tcf_classify(skb, tp, tp, res, compat_mode,
|
||||
&last_executed_chain);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_classify);
|
||||
|
||||
int tcf_classify_ingress(struct sk_buff *skb,
|
||||
const struct tcf_block *ingress_block,
|
||||
const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
|
||||
u32 last_executed_chain = 0;
|
||||
|
@ -1603,20 +1593,22 @@ int tcf_classify_ingress(struct sk_buff *skb,
|
|||
struct tc_skb_ext *ext;
|
||||
int ret;
|
||||
|
||||
ext = skb_ext_find(skb, TC_SKB_EXT);
|
||||
if (block) {
|
||||
ext = skb_ext_find(skb, TC_SKB_EXT);
|
||||
|
||||
if (ext && ext->chain) {
|
||||
struct tcf_chain *fchain;
|
||||
if (ext && ext->chain) {
|
||||
struct tcf_chain *fchain;
|
||||
|
||||
fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
|
||||
if (!fchain)
|
||||
return TC_ACT_SHOT;
|
||||
fchain = tcf_chain_lookup_rcu(block, ext->chain);
|
||||
if (!fchain)
|
||||
return TC_ACT_SHOT;
|
||||
|
||||
/* Consume, so cloned/redirect skbs won't inherit ext */
|
||||
skb_ext_del(skb, TC_SKB_EXT);
|
||||
/* Consume, so cloned/redirect skbs won't inherit ext */
|
||||
skb_ext_del(skb, TC_SKB_EXT);
|
||||
|
||||
tp = rcu_dereference_bh(fchain->filter_chain);
|
||||
last_executed_chain = fchain->index;
|
||||
tp = rcu_dereference_bh(fchain->filter_chain);
|
||||
last_executed_chain = fchain->index;
|
||||
}
|
||||
}
|
||||
|
||||
ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
|
||||
|
@ -1635,7 +1627,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
|
|||
return ret;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_classify_ingress);
|
||||
EXPORT_SYMBOL(tcf_classify);
|
||||
|
||||
struct tcf_chain_info {
|
||||
struct tcf_proto __rcu **pprev;
|
||||
|
@ -3825,7 +3817,7 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
|
|||
|
||||
fl = rcu_dereference_bh(qe->filter_chain);
|
||||
|
||||
switch (tcf_classify(skb, fl, &cl_res, false)) {
|
||||
switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
|
||||
case TC_ACT_SHOT:
|
||||
qdisc_qstats_drop(sch);
|
||||
__qdisc_drop(skb, to_free);
|
||||
|
|
|
@ -394,7 +394,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
list_for_each_entry(flow, &p->flows, list) {
|
||||
fl = rcu_dereference_bh(flow->filter_list);
|
||||
if (fl) {
|
||||
result = tcf_classify(skb, fl, &res, true);
|
||||
result = tcf_classify(skb, NULL, fl, &res, true);
|
||||
if (result < 0)
|
||||
continue;
|
||||
flow = (struct atm_flow_data *)res.class;
|
||||
|
|
|
@ -1665,7 +1665,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
|
|||
goto hash;
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
result = tcf_classify(skb, filter, &res, false);
|
||||
result = tcf_classify(skb, NULL, filter, &res, false);
|
||||
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
|
|
|
@ -228,7 +228,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
/*
|
||||
* Step 2+n. Apply classifier.
|
||||
*/
|
||||
result = tcf_classify(skb, fl, &res, true);
|
||||
result = tcf_classify(skb, NULL, fl, &res, true);
|
||||
if (!fl || result < 0)
|
||||
goto fallback;
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
fl = rcu_dereference_bh(q->filter_list);
|
||||
result = tcf_classify(skb, fl, &res, false);
|
||||
result = tcf_classify(skb, NULL, fl, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
|
@ -242,7 +242,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
else {
|
||||
struct tcf_result res;
|
||||
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
|
||||
int result = tcf_classify(skb, fl, &res, false);
|
||||
int result = tcf_classify(skb, NULL, fl, &res, false);
|
||||
|
||||
pr_debug("result %d class 0x%04x\n", result, res.classid);
|
||||
|
||||
|
|
|
@ -390,7 +390,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
if (TC_H_MAJ(skb->priority) != sch->handle) {
|
||||
fl = rcu_dereference_bh(q->filter_list);
|
||||
err = tcf_classify(skb, fl, &res, false);
|
||||
err = tcf_classify(skb, NULL, fl, &res, false);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (err) {
|
||||
case TC_ACT_STOLEN:
|
||||
|
|
|
@ -91,7 +91,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return fq_codel_hash(q, skb) + 1;
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
result = tcf_classify(skb, filter, &res, false);
|
||||
result = tcf_classify(skb, NULL, filter, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
|
@ -94,7 +94,7 @@ static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return fq_pie_hash(q, skb) + 1;
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
result = tcf_classify(skb, filter, &res, false);
|
||||
result = tcf_classify(skb, NULL, filter, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
|
@ -1130,7 +1130,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
head = &q->root;
|
||||
tcf = rcu_dereference_bh(q->root.filter_list);
|
||||
while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
|
||||
while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
case TC_ACT_QUEUED:
|
||||
|
|
|
@ -238,7 +238,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
}
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
|
||||
while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
case TC_ACT_QUEUED:
|
||||
|
|
|
@ -36,7 +36,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
int err;
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
err = tcf_classify(skb, fl, &res, false);
|
||||
err = tcf_classify(skb, NULL, fl, &res, false);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (err) {
|
||||
case TC_ACT_STOLEN:
|
||||
|
|
|
@ -39,7 +39,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
if (TC_H_MAJ(skb->priority) != sch->handle) {
|
||||
fl = rcu_dereference_bh(q->filter_list);
|
||||
err = tcf_classify(skb, fl, &res, false);
|
||||
err = tcf_classify(skb, NULL, fl, &res, false);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (err) {
|
||||
case TC_ACT_STOLEN:
|
||||
|
|
|
@ -690,7 +690,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
fl = rcu_dereference_bh(q->filter_list);
|
||||
result = tcf_classify(skb, fl, &res, false);
|
||||
result = tcf_classify(skb, NULL, fl, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
|
@ -257,7 +257,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
|
|||
struct tcf_result res;
|
||||
int result;
|
||||
|
||||
result = tcf_classify(skb, fl, &res, false);
|
||||
result = tcf_classify(skb, NULL, fl, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
|
@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return sfq_hash(q, skb) + 1;
|
||||
|
||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
result = tcf_classify(skb, fl, &res, false);
|
||||
result = tcf_classify(skb, NULL, fl, &res, false);
|
||||
if (result >= 0) {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (result) {
|
||||
|
|
Loading…
Reference in New Issue