mirror of https://gitee.com/openkylin/linux.git
sched: act: ife: migrate to use per-cpu counters
This patch migrates the current counter handling which is protected by a spinlock to a per-cpu counter handling. This reduce the time where the spinlock is being held. Signed-off-by: Alexander Aring <aring@mojatatu.com> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
734534e9a8
commit
ced273eacf
|
@ -477,7 +477,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
|||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
|
||||
bind, false);
|
||||
bind, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ACT_P_CREATED;
|
||||
|
@ -638,19 +638,15 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
|
|||
u8 *tlv_data;
|
||||
u16 metalen;
|
||||
|
||||
spin_lock(&ife->tcf_lock);
|
||||
bstats_update(&ife->tcf_bstats, skb);
|
||||
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
tcf_lastuse_update(&ife->tcf_tm);
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
|
||||
if (skb_at_tc_ingress(skb))
|
||||
skb_push(skb, skb->dev->hard_header_len);
|
||||
|
||||
tlv_data = ife_decode(skb, &metalen);
|
||||
if (unlikely(!tlv_data)) {
|
||||
spin_lock(&ife->tcf_lock);
|
||||
ife->tcf_qstats.drops++;
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
|
@ -668,14 +664,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
|
|||
*/
|
||||
pr_info_ratelimited("Unknown metaid %d dlen %d\n",
|
||||
mtype, dlen);
|
||||
ife->tcf_qstats.overlimits++;
|
||||
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(tlv_data != ifehdr_end)) {
|
||||
spin_lock(&ife->tcf_lock);
|
||||
ife->tcf_qstats.drops++;
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
|
@ -727,23 +721,20 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
|||
exceed_mtu = true;
|
||||
}
|
||||
|
||||
spin_lock(&ife->tcf_lock);
|
||||
bstats_update(&ife->tcf_bstats, skb);
|
||||
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
tcf_lastuse_update(&ife->tcf_tm);
|
||||
|
||||
if (!metalen) { /* no metadata to send */
|
||||
/* abuse overlimits to count when we allow packet
|
||||
* with no metadata
|
||||
*/
|
||||
ife->tcf_qstats.overlimits++;
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
return action;
|
||||
}
|
||||
/* could be stupid policy setup or mtu config
|
||||
* so lets be conservative.. */
|
||||
if ((action == TC_ACT_SHOT) || exceed_mtu) {
|
||||
ife->tcf_qstats.drops++;
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
|
@ -752,6 +743,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
|||
|
||||
ife_meta = ife_encode(skb, metalen);
|
||||
|
||||
spin_lock(&ife->tcf_lock);
|
||||
|
||||
/* XXX: we dont have a clever way of telling encode to
|
||||
* not repeat some of the computations that are done by
|
||||
* ops->presence_check...
|
||||
|
@ -763,8 +756,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
|||
}
|
||||
if (err < 0) {
|
||||
/* too corrupt to keep around if overwritten */
|
||||
ife->tcf_qstats.drops++;
|
||||
spin_unlock(&ife->tcf_lock);
|
||||
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
skboff += err;
|
||||
|
|
Loading…
Reference in New Issue