mirror of https://gitee.com/openkylin/linux.git
net: sched: introduce and use qstats read helpers
Classful qdiscs can't access directly the child qdiscs backlog
length: if such qdisc is NOLOCK, per CPU values should be
accounted instead.
Most qdiscs no not respect the above. As a result, qstats fetching
for most classful qdisc is currently incorrect: if the child qdisc is
NOLOCK, it always reports 0 len backlog.
This change introduces a pair of helpers to safely fetch
both backlog and qlen and use them in stats class dumping
functions, fixing the above issue and cleaning a bit the code.
DRR needs also to access the child qdisc queue length, so it
needs custom handling.
Fixes: c5ad119fb6
("net: sched: pfifo_fast use skb_array")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0db6f8befc
commit
5dd431b6b9
|
@ -923,6 +923,24 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
|
||||||
sch->qstats.overlimits++;
|
sch->qstats.overlimits++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
|
||||||
|
{
|
||||||
|
__u32 qlen = qdisc_qlen_sum(sch);
|
||||||
|
|
||||||
|
return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
|
||||||
|
__u32 *backlog)
|
||||||
|
{
|
||||||
|
struct gnet_stats_queue qstats = { 0 };
|
||||||
|
__u32 len = qdisc_qlen_sum(sch);
|
||||||
|
|
||||||
|
__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
|
||||||
|
*qlen = qstats.qlen;
|
||||||
|
*backlog = qstats.backlog;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
|
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
|
||||||
{
|
{
|
||||||
qh->head = NULL;
|
qh->head = NULL;
|
||||||
|
|
|
@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
{
|
{
|
||||||
struct cbq_sched_data *q = qdisc_priv(sch);
|
struct cbq_sched_data *q = qdisc_priv(sch);
|
||||||
struct cbq_class *cl = (struct cbq_class *)arg;
|
struct cbq_class *cl = (struct cbq_class *)arg;
|
||||||
|
__u32 qlen;
|
||||||
|
|
||||||
cl->xstats.avgidle = cl->avgidle;
|
cl->xstats.avgidle = cl->avgidle;
|
||||||
cl->xstats.undertime = 0;
|
cl->xstats.undertime = 0;
|
||||||
|
qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
|
||||||
|
|
||||||
if (cl->undertime != PSCHED_PASTPERFECT)
|
if (cl->undertime != PSCHED_PASTPERFECT)
|
||||||
cl->xstats.undertime = cl->undertime - q->now;
|
cl->xstats.undertime = cl->undertime - q->now;
|
||||||
|
@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &cl->bstats) < 0 ||
|
d, NULL, &cl->bstats) < 0 ||
|
||||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
|
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
|
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
|
||||||
|
|
|
@ -269,7 +269,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
struct gnet_dump *d)
|
struct gnet_dump *d)
|
||||||
{
|
{
|
||||||
struct drr_class *cl = (struct drr_class *)arg;
|
struct drr_class *cl = (struct drr_class *)arg;
|
||||||
__u32 qlen = cl->qdisc->q.qlen;
|
__u32 qlen = qdisc_qlen_sum(cl->qdisc);
|
||||||
|
struct Qdisc *cl_q = cl->qdisc;
|
||||||
struct tc_drr_stats xstats;
|
struct tc_drr_stats xstats;
|
||||||
|
|
||||||
memset(&xstats, 0, sizeof(xstats));
|
memset(&xstats, 0, sizeof(xstats));
|
||||||
|
@ -279,7 +280,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &cl->bstats) < 0 ||
|
d, NULL, &cl->bstats) < 0 ||
|
||||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
|
gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||||
|
|
|
@ -1328,8 +1328,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
{
|
{
|
||||||
struct hfsc_class *cl = (struct hfsc_class *)arg;
|
struct hfsc_class *cl = (struct hfsc_class *)arg;
|
||||||
struct tc_hfsc_stats xstats;
|
struct tc_hfsc_stats xstats;
|
||||||
|
__u32 qlen;
|
||||||
|
|
||||||
cl->qstats.backlog = cl->qdisc->qstats.backlog;
|
qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
|
||||||
xstats.level = cl->level;
|
xstats.level = cl->level;
|
||||||
xstats.period = cl->cl_vtperiod;
|
xstats.period = cl->cl_vtperiod;
|
||||||
xstats.work = cl->cl_total;
|
xstats.work = cl->cl_total;
|
||||||
|
@ -1337,7 +1338,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
|
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
|
||||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
|
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||||
|
|
|
@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
|
||||||
};
|
};
|
||||||
__u32 qlen = 0;
|
__u32 qlen = 0;
|
||||||
|
|
||||||
if (!cl->level && cl->leaf.q) {
|
if (!cl->level && cl->leaf.q)
|
||||||
qlen = cl->leaf.q->q.qlen;
|
qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
|
||||||
qs.backlog = cl->leaf.q->qstats.backlog;
|
|
||||||
}
|
|
||||||
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
|
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
|
||||||
INT_MIN, INT_MAX);
|
INT_MIN, INT_MAX);
|
||||||
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
|
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
|
||||||
|
|
|
@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
|
|
||||||
sch = dev_queue->qdisc_sleeping;
|
sch = dev_queue->qdisc_sleeping;
|
||||||
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
|
qdisc_qstats_copy(d, sch) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
sch = dev_queue->qdisc_sleeping;
|
sch = dev_queue->qdisc_sleeping;
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &sch->bstats) < 0 ||
|
d, NULL, &sch->bstats) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL,
|
qdisc_qstats_copy(d, sch) < 0)
|
||||||
&sch->qstats, sch->q.qlen) < 0)
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -344,7 +344,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
cl_q = q->queues[cl - 1];
|
cl_q = q->queues[cl - 1];
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &cl_q->bstats) < 0 ||
|
d, NULL, &cl_q->bstats) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
|
qdisc_qstats_copy(d, cl_q) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -365,7 +365,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
cl_q = q->queues[cl - 1];
|
cl_q = q->queues[cl - 1];
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &cl_q->bstats) < 0 ||
|
d, NULL, &cl_q->bstats) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
|
qdisc_qstats_copy(d, cl_q) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -655,8 +655,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||||
d, NULL, &cl->bstats) < 0 ||
|
d, NULL, &cl->bstats) < 0 ||
|
||||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL,
|
qdisc_qstats_copy(d, cl->qdisc) < 0)
|
||||||
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||||
|
|
|
@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
|
|
||||||
sch = dev_queue->qdisc_sleeping;
|
sch = dev_queue->qdisc_sleeping;
|
||||||
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
||||||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
|
qdisc_qstats_copy(d, sch) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue