net: sched: avoid costly atomic operation in fq_dequeue()

Standard qdisc API to setup a timer implies an atomic operation on every
packet dequeue : qdisc_unthrottled()

It turns out this is not really needed for FQ, as FQ has no concept of
global qdisc throttling, being a qdisc handling many different flows,
some of them can be throttled, while others are not.

Fix is straightforward : add a 'bool throttle' to
qdisc_watchdog_schedule_ns(), and remove calls to qdisc_unthrottled()
in sch_fq.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2014-10-04 10:11:31 -07:00 committed by David S. Miller
parent 681d2421e1
commit f2600cf02b
4 changed files with 9 additions and 9 deletions

View File

@ -65,12 +65,12 @@ struct qdisc_watchdog {
}; };
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires) psched_time_t expires)
{ {
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
} }
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);

View File

@ -594,13 +594,14 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
} }
EXPORT_SYMBOL(qdisc_watchdog_init); EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
{ {
if (test_bit(__QDISC_STATE_DEACTIVATED, if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state)) &qdisc_root_sleeping(wd->qdisc)->state))
return; return;
qdisc_throttled(wd->qdisc); if (throttle)
qdisc_throttled(wd->qdisc);
hrtimer_start(&wd->timer, hrtimer_start(&wd->timer,
ns_to_ktime(expires), ns_to_ktime(expires),

View File

@ -377,7 +377,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (time_after(jiffies, f->age + q->flow_refill_delay)) if (time_after(jiffies, f->age + q->flow_refill_delay))
f->credit = max_t(u32, f->credit, q->quantum); f->credit = max_t(u32, f->credit, q->quantum);
q->inactive_flows--; q->inactive_flows--;
qdisc_unthrottled(sch);
} }
/* Note: this overwrites f->age */ /* Note: this overwrites f->age */
@ -385,7 +384,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(f == &q->internal)) { if (unlikely(f == &q->internal)) {
q->stat_internal_packets++; q->stat_internal_packets++;
qdisc_unthrottled(sch);
} }
sch->q.qlen++; sch->q.qlen++;
@ -433,7 +431,8 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
if (!head->first) { if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL) if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_ns(&q->watchdog, qdisc_watchdog_schedule_ns(&q->watchdog,
q->time_next_delayed_flow); q->time_next_delayed_flow,
false);
return NULL; return NULL;
} }
} }
@ -495,7 +494,6 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
} }
out: out:
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
return skb; return skb;
} }

View File

@ -268,7 +268,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
} }
qdisc_watchdog_schedule_ns(&q->watchdog, qdisc_watchdog_schedule_ns(&q->watchdog,
now + max_t(long, -toks, -ptoks)); now + max_t(long, -toks, -ptoks),
true);
/* Maybe we have a shorter packet in the queue, /* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool, which can be sent now. It sounds cool,