Re: [Cake] [PATCH net-next v9 2/7] sch_cake: Add ingress mode

2018-05-08 Thread Sebastian Moeller


> On May 8, 2018, at 16:34, Toke Høiland-Jørgensen  wrote:
> [...]
> 
> This commit also adds a separate switch to enable ingress mode rate
> autoscaling. If enabled, the autoscaling code will observe the actual
> traffic rate and adjust the shaper rate to match it. This can help avoid
> latency increases in the case where the actual bottleneck rate decreases
> below the shaped rate. The scaling filters out spikes by an EWMA filter.
[...]

This reminds me of an discussion I had with a user who tried the 
autorate-ingress feature unsuccessfully, it sems he would have needed an 
additional toggle to set the lower range for the bandwidth as due to some 
quirkiness his ingress did not as much get autorated but rather throttled. So 
@Jonathan and @Toke, is that just an unfortunate sould that can't be helped or 
does such an additional toggle make some sense (if only as a safety belt and 
suspenders kind of thing). If yes, I might try to actually test it.

Best Regards
Sebastian
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v9 2/7] sch_cake: Add ingress mode

2018-05-08 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   78 +++---
 1 file changed, 74 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c3446a99341f..aeafbb95becd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -441,7 +441,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   cobalt_time_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool drop = false;
 
@@ -466,6 +467,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
cobalt_tdiff_t schedule = now - vars->drop_next;
 
bool over_target = sojourn > p->target &&
+  sojourn > p->mtu_time * bulk_flows * 2 &&
   sojourn > p->mtu_time * 4;
bool next_due= vars->count && schedule >= 0;
 
@@ -919,6 +921,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -995,8 +1000,39 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = now - q->last_packet_time;
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = cake_ewma(q->avg_packet_interval,
+  packet_interval,
+   packet_interval > q->avg_packet_interval ? 2 : 8);
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = now - q->avg_window_begin;
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (now - q->last_reconfig_time > (NSEC_PER_SEC / 4)) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;
+   }
 
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1251,14 +1287,26 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
 
/* Last packet in queue may be marked, shouldn't be dropped */
-   if (!cobalt_should_drop(>cvars, >cparams, now, skb) ||
+   if (!cobalt_should_drop(>cvars, >cparams, now, skb,
+   (b->bulk_flow_count *
+!!(q->rate_flags &
+