net: sched: when clearing NOLOCK, clear TCQ_F_CPUSTATS, too
Since stats updating is always consistent with TCQ_F_CPUSTATS flag, we can disable it at qdisc creation time flipping such bit. In my experiments, if the NOLOCK flag is cleared, per CPU stats accounting does not give any measurable performance gain, but it waste some memory. Let's clear TCQ_F_CPUSTATS together with NOLOCK, when enslaving a NOLOCK qdisc to 'lock' one. Use stats update helper inside pfifo_fast, to cope correctly with TCQ_F_CPUSTATS flag change. As a side effect, q.qlen value for any child qdiscs is always consistent for all lock classfull qdiscs. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9c01c9f1f2
commit
8a53e616de
|
@ -1106,6 +1106,32 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
|
|||
return skb;
|
||||
}
|
||||
|
||||
static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (qdisc_is_percpu_stats(sch)) {
|
||||
qdisc_qstats_cpu_backlog_dec(sch, skb);
|
||||
qdisc_bstats_cpu_update(sch, skb);
|
||||
qdisc_qstats_atomic_qlen_dec(sch);
|
||||
} else {
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
sch->q.qlen--;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
|
||||
unsigned int pkt_len)
|
||||
{
|
||||
if (qdisc_is_percpu_stats(sch)) {
|
||||
qdisc_qstats_atomic_qlen_inc(sch);
|
||||
this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
|
||||
} else {
|
||||
sch->qstats.backlog += pkt_len;
|
||||
sch->q.qlen++;
|
||||
}
|
||||
}
|
||||
|
||||
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
|
||||
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
|
||||
{
|
||||
|
|
|
@ -998,6 +998,19 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
|
|||
qdisc_put(old);
|
||||
}
|
||||
|
||||
static void qdisc_clear_nolock(struct Qdisc *sch)
|
||||
{
|
||||
sch->flags &= ~TCQ_F_NOLOCK;
|
||||
if (!(sch->flags & TCQ_F_CPUSTATS))
|
||||
return;
|
||||
|
||||
free_percpu(sch->cpu_bstats);
|
||||
free_percpu(sch->cpu_qstats);
|
||||
sch->cpu_bstats = NULL;
|
||||
sch->cpu_qstats = NULL;
|
||||
sch->flags &= ~TCQ_F_CPUSTATS;
|
||||
}
|
||||
|
||||
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
|
||||
* to device "dev".
|
||||
*
|
||||
|
@ -1076,7 +1089,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
|||
/* Only support running class lockless if parent is lockless */
|
||||
if (new && (new->flags & TCQ_F_NOLOCK) &&
|
||||
parent && !(parent->flags & TCQ_F_NOLOCK))
|
||||
new->flags &= ~TCQ_F_NOLOCK;
|
||||
qdisc_clear_nolock(new);
|
||||
|
||||
if (!cops || !cops->graft)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -629,11 +629,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|||
if (unlikely(err))
|
||||
return qdisc_drop_cpu(skb, qdisc, to_free);
|
||||
|
||||
qdisc_qstats_atomic_qlen_inc(qdisc);
|
||||
/* Note: skb can not be used after skb_array_produce(),
|
||||
* so we better not use qdisc_qstats_cpu_backlog_inc()
|
||||
*/
|
||||
this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
|
||||
qdisc_update_stats_at_enqueue(qdisc, pkt_len);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -652,9 +648,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|||
skb = __skb_array_consume(q);
|
||||
}
|
||||
if (likely(skb)) {
|
||||
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
||||
qdisc_bstats_cpu_update(qdisc, skb);
|
||||
qdisc_qstats_atomic_qlen_dec(qdisc);
|
||||
qdisc_update_stats_at_dequeue(qdisc, skb);
|
||||
} else {
|
||||
qdisc->empty = true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue