net: sched: pie: add derandomization mechanism

Random dropping of packets to achieve latency control may
introduce outlier situations where packets are dropped too
close to each other or too far from each other. This can
cause the real drop percentage to temporarily deviate from
the intended drop probability. In certain scenarios, such
as a small number of simultaneous TCP flows, these
deviations can cause significant deviations in link
utilization and queuing latency.

RFC 8033 suggests using a derandomization mechanism to avoid
these deviations.

Signed-off-by: Mohit P. Tahiliani <tahiliani@nitk.edu.in>
Signed-off-by: Dhaval Khandla <dhavaljkhandla26@gmail.com>
Signed-off-by: Hrishikesh Hiraskar <hrishihiraskar@gmail.com>
Signed-off-by: Manish Kumar B <bmanish15597@gmail.com>
Signed-off-by: Sachin D. Patil <sdp.sachin@gmail.com>
Signed-off-by: Leslie Monis <lesliemonis@gmail.com>
Acked-by: Dave Taht <dave.taht@gmail.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mohit P. Tahiliani 2019-02-26 00:40:00 +05:30 committed by David S. Miller
parent 3f7ae5f3dc
commit 95400b975d
1 changed files with 28 additions and 2 deletions

View File

@ -55,8 +55,10 @@ struct pie_vars {
psched_time_t qdelay_old; psched_time_t qdelay_old;
u64 dq_count; /* measured in bytes */ u64 dq_count; /* measured in bytes */
psched_time_t dq_tstamp; /* drain rate */ psched_time_t dq_tstamp; /* drain rate */
u64 accu_prob; /* accumulated drop probability */
u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */ u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
u32 qlen_old; /* in bytes */ u32 qlen_old; /* in bytes */
u8 accu_prob_overflows; /* overflows of accu_prob */
}; };
/* statistics gathering */ /* statistics gathering */
@ -91,9 +93,11 @@ static void pie_params_init(struct pie_params *params)
static void pie_vars_init(struct pie_vars *vars) static void pie_vars_init(struct pie_vars *vars)
{ {
vars->dq_count = DQCOUNT_INVALID; vars->dq_count = DQCOUNT_INVALID;
vars->accu_prob = 0;
vars->avg_dq_rate = 0; vars->avg_dq_rate = 0;
/* default of 150 ms in pschedtime */ /* default of 150 ms in pschedtime */
vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
vars->accu_prob_overflows = 0;
} }
static bool drop_early(struct Qdisc *sch, u32 packet_size) static bool drop_early(struct Qdisc *sch, u32 packet_size)
@ -128,10 +132,30 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
else else
local_prob = q->vars.prob; local_prob = q->vars.prob;
prandom_bytes(&rnd, 8); if (local_prob == 0) {
if (rnd < local_prob) q->vars.accu_prob = 0;
q->vars.accu_prob_overflows = 0;
}
if (local_prob > MAX_PROB - q->vars.accu_prob)
q->vars.accu_prob_overflows++;
q->vars.accu_prob += local_prob;
if (q->vars.accu_prob_overflows == 0 &&
q->vars.accu_prob < (MAX_PROB / 100) * 85)
return false;
if (q->vars.accu_prob_overflows == 8 &&
q->vars.accu_prob >= MAX_PROB / 2)
return true; return true;
prandom_bytes(&rnd, 8);
if (rnd < local_prob) {
q->vars.accu_prob = 0;
q->vars.accu_prob_overflows = 0;
return true;
}
return false; return false;
} }
@ -168,6 +192,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out: out:
q->stats.dropped++; q->stats.dropped++;
q->vars.accu_prob = 0;
q->vars.accu_prob_overflows = 0;
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }