[Cake] [PATCH net-next v8 3/7] sch_cake: Add optional ACK filter

2018-05-04 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  261 +-
 1 file changed, 255 insertions(+), 6 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ca86e3ed14c..9a70e99afe7e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -127,7 +127,6 @@ struct cake_flow {
/* this stuff is all needed per-flow at dequeue time */
struct sk_buff*head;
struct sk_buff*tail;
-   struct sk_buff*ackcheck;
struct list_head  flowchain;
s32   deficit;
struct cobalt_vars cvars;
@@ -712,9 +711,6 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) {
flow->head = skb->next;
skb->next = NULL;
-
-   if (skb == flow->ackcheck)
-   flow->ackcheck = NULL;
}
 
return skb;
@@ -732,6 +728,239 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offs

[Cake] [PATCH net-next v8 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-04 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   70 ++
 1 file changed, 70 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9a70e99afe7e..cc45a56d35d6 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -70,6 +70,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -519,6 +525,61 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   enum ip_conntrack_info ctinfo;
+   bool rev = false;
+
+   struct nf_conn *ct;
+   const struct nf_conntrack_tuple *tuple;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -546,6 +607,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1673,6 +1737,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK);
 
+   if (tb[TCA_CAKE_NAT]) {
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+   }
+
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
 

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v8 5/7] sch_cake: Add DiffServ handling

2018-05-04 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  394 +-
 1 file changed, 387 insertions(+), 7 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index cc45a56d35d6..1e5951d26ed2 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -305,6 +305,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1189,6 +1251,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   };
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Control */
+
+   default:
+   /* If there is no Diffserv field, treat as best-effort */
+   return 0;
+   };
+}
+
 static void cake_reconfigure(struct Qdisc *sch);
 
 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -1203,7 +1305,19 @@ static s32 ca

[Cake] [PATCH net-next v8 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-04 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2595 
 4 files changed, 2712 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v8 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-04 Thread Toke Høiland-Jørgensen
Thank you for the review! A few comments below, I'll fix the rest.

> [...]
> 
> So sch_cake doesn't accept normal tc filters? Is this intentional?
> If so, why?

For two reasons:

- The two-level scheduling used in CAKE (tins / diffserv classes, and
  flow hashing) does not map in an obvious way to the classification
  index of tc filters.

- No one has asked for it. We have done our best to accommodate the
  features people want in a home router qdisc directly in CAKE, and the
  ability to integrate tc filters has never been requested.

>> +static u16 quantum_div[CAKE_QUEUES + 1] = {0};
>> +
>> +#define REC_INV_SQRT_CACHE (16)
>> +static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
>> +
>> +/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
>> + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
>> + *
>> + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
>> + */
>> +
>> +static void cobalt_newton_step(struct cobalt_vars *vars)
>> +{
>> +   u32 invsqrt = vars->rec_inv_sqrt;
>> +   u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
>> +   u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
>> +
>> +   val >>= 2; /* avoid overflow in following multiply */
>> +   val = (val * invsqrt) >> (32 - 2 + 1);
>> +
>> +   vars->rec_inv_sqrt = val;
>> +}
>> +
>> +static void cobalt_invsqrt(struct cobalt_vars *vars)
>> +{
>> +   if (vars->count < REC_INV_SQRT_CACHE)
>> +   vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
>> +   else
>> +   cobalt_newton_step(vars);
>> +}
>
> Looks pretty much duplicated with codel...

Cobalt is derived from CoDel, and so naturally shares some features with
it. However, it is quite different in other respects, so we can't just
use the existing CoDel code for the parts that are similar. We don't
feel quite confident enough in Cobalt (yet) to propose it replace CoDel
everywhere else in the kernel; so we have elected to keep it internal to
CAKE instead.

>> [...]
>>
>> +static int cake_init(struct Qdisc *sch, struct nlattr *opt,
>> +struct netlink_ext_ack *extack)
>> +{
>> +   struct cake_sched_data *q = qdisc_priv(sch);
>> +   int i, j;
>> +
>> +   sch->limit = 10240;
>> +   q->tin_mode = CAKE_DIFFSERV_BESTEFFORT;
>> +   q->flow_mode  = CAKE_FLOW_TRIPLE;
>> +
>> +   q->rate_bps = 0; /* unlimited by default */
>> +
>> +   q->interval = 10; /* 100ms default */
>> +   q->target   =   5000; /* 5ms: codel RFC argues
>> +  * for 5 to 10% of interval
>> +  */
>> +
>> +   q->cur_tin = 0;
>> +   q->cur_flow  = 0;
>> +
>> +   if (opt) {
>> +   int err = cake_change(sch, opt, extack);
>> +
>> +   if (err)
>> +   return err;
>
>
> Not sure if you really want to reallocate q->tines below for this
> case.

I'm not sure what you mean here? If there's an error we return it and
the qdisc is not created. If there's not, we allocate and on subsequent
changes cake_change() will be called directly, or? Can the init function
ever be called again during the lifetime of the qdisc?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] Hopefully fixed ACK filter for v6

2018-05-06 Thread Toke Høiland-Jørgensen
Jonathan Morton  writes:

>>> I certainly am thankful for your work, and believe you deserve $CAKE
>>> and $BEVERAGE, I am also leaf to believe 'the cake is a lie'
>>> https://m.youtube.com/watch?v=qdrs3gr_GAs ;)
>> 
>> Haha, yes, of course I am aware that the cake really is a lie. Which
>> makes us in league with GLADOS, I suppose, since we're promising
>> everyone CAKE ;)
>
> In lieu of sending a home-baked cake to Denmark, I have obtained an
> allegedly Danish apple cake from the local supermarket. Though with
> the inherent ambiguity of the English language, it's unclear whether
> it is the cake itself that's supposed to be Danish, or the apples used
> to make it. (According to the ingredients list, only 2% of the cake is
> actually apples.)

Haha, enjoy. Though I will take no responsibility for the quality of
"Danish" cuisine sold at your local supermarket ;)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v8 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-07 Thread Toke Høiland-Jørgensen
Cong Wang  writes:

> On Fri, May 4, 2018 at 12:10 PM, Toke Høiland-Jørgensen  wrote:
>> Thank you for the review! A few comments below, I'll fix the rest.
>>
>>> [...]
>>>
>>> So sch_cake doesn't accept normal tc filters? Is this intentional?
>>> If so, why?
>>
>> For two reasons:
>>
>> - The two-level scheduling used in CAKE (tins / diffserv classes, and
>>   flow hashing) does not map in an obvious way to the classification
>>   index of tc filters.
>
> Sounds like you need to extend struct tcf_result?

Well, the obvious way to support filters would be to have skb->priority
override the diffserv mapping if set, and have the filter classification
result select the queue within that tier. That would probably be doable,
but see below.

>> - No one has asked for it. We have done our best to accommodate the
>>   features people want in a home router qdisc directly in CAKE, and the
>>   ability to integrate tc filters has never been requested.
>
> It is not hard to integrate, basically you need to call
> tcf_classify(). Although it is not mandatory, it is odd to merge a
> qdisc doesn't work with existing tc filters (and actions too).

I looked at the fq_codel code to do this. Is it possible to support
filtering without implementing Qdisc_class_ops? If so, I'll give it a
shot; but implementing the class ops is more than I can commit to...

>>>> +static int cake_init(struct Qdisc *sch, struct nlattr *opt,
>>>> +struct netlink_ext_ack *extack)
>>>> +{
>>>> +   struct cake_sched_data *q = qdisc_priv(sch);
>>>> +   int i, j;
>>>> +
>>>> +   sch->limit = 10240;
>>>> +   q->tin_mode = CAKE_DIFFSERV_BESTEFFORT;
>>>> +   q->flow_mode  = CAKE_FLOW_TRIPLE;
>>>> +
>>>> +   q->rate_bps = 0; /* unlimited by default */
>>>> +
>>>> +   q->interval = 10; /* 100ms default */
>>>> +   q->target   =   5000; /* 5ms: codel RFC argues
>>>> +  * for 5 to 10% of interval
>>>> +  */
>>>> +
>>>> +   q->cur_tin = 0;
>>>> +   q->cur_flow  = 0;
>>>> +
>>>> +   if (opt) {
>>>> +   int err = cake_change(sch, opt, extack);
>>>> +
>>>> +   if (err)
>>>> +   return err;
>>>
>>>
>>> Not sure if you really want to reallocate q->tines below for this
>>> case.
>>
>> I'm not sure what you mean here? If there's an error we return it and
>> the qdisc is not created. If there's not, we allocate and on subsequent
>> changes cake_change() will be called directly, or? Can the init function
>> ever be called again during the lifetime of the qdisc?
>>
>
> In non-error case, you call cake_change() first and then allocate
> ->tins with kvzalloc() below. For me it looks like you don't need to
> allocate it again when ->tins!=NULL.

No, we definitely don't. It's just not clear to me how cake_init() could
ever be called with q->tins already allocated?

I can add a check in any case, though, I see that there is one in
fq_codel as well...

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v9 2/7] sch_cake: Add ingress mode

2018-05-08 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   78 +++---
 1 file changed, 74 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c3446a99341f..aeafbb95becd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -441,7 +441,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   cobalt_time_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool drop = false;
 
@@ -466,6 +467,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
cobalt_tdiff_t schedule = now - vars->drop_next;
 
bool over_target = sojourn > p->target &&
+  sojourn > p->mtu_time * bulk_flows * 2 &&
   sojourn > p->mtu_time * 4;
bool next_due= vars->count && schedule >= 0;
 
@@ -919,6 +921,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -995,8 +1000,39 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = now - q->last_packet_time;
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = cake_ewma(q->avg_packet_interval,
+  packet_interval,
+   packet_interval > q->avg_packet_interval ? 2 : 8);
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = now - q->avg_window_begin;
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (now - q->last_reconfig_time > (NSEC_PER_SEC / 4)) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;
+   }
 
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1251,14 +1287,26 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
 
/* Last packet in queue may be marked, shouldn't be dropped */
-   if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) ||
+

[Cake] [PATCH net-next v9 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-08 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2686 
 4 files changed, 2803 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v9 3/7] sch_cake: Add optional ACK filter

2018-05-08 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  264 +-
 1 file changed, 258 insertions(+), 6 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index aeafbb95becd..7e57eef5f949 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -128,7 +128,6 @@ struct cake_flow {
/* this stuff is all needed per-flow at dequeue time */
struct sk_buff*head;
struct sk_buff*tail;
-   struct sk_buff*ackcheck;
struct list_head  flowchain;
s32   deficit;
struct cobalt_vars cvars;
@@ -748,9 +747,6 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) {
flow->head = skb->next;
skb->next = NULL;
-
-   if (skb == flow->ackcheck)
-   flow->ackcheck = NULL;
}
 
return skb;
@@ -768,6 +764,239 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offs

[Cake] [PATCH net-next v9 5/7] sch_cake: Add DiffServ handling

2018-05-08 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches CAKE's
qdisc handle, the minor number will be interpreted as a priority tier if it is
less than or equal to the number of configured priority tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  408 +-
 1 file changed, 401 insertions(+), 7 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index a227a685bd58..6f9980a6603e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -308,6 +308,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1225,6 +1287,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v9 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-08 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   73 ++
 1 file changed, 73 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7e57eef5f949..a227a685bd58 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -522,6 +528,61 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   enum ip_conntrack_info ctinfo;
+   bool rev = false;
+
+   struct nf_conn *ct;
+   const struct nf_conntrack_tuple *tuple;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -549,6 +610,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1717,6 +1781,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK);
 
+   if (tb[TCA_CAKE_NAT]) {
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+   }
+
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
 
@@ -1881,6 +1951,9 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT, !!(q->flow_mode & 
CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v9 7/7] sch_cake: Conditionally split GSO segments

2018-05-08 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index af25ddb333d7..1ecb2eef958e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 #define US2TIME(a) (a * (u64)NSEC_PER_USEC)
 
 typedef u64 cobalt_time_t;
@@ -1479,36 +1480,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]+= len;
+   b->tin_b

[Cake] [PATCH net-next v9 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-08 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1729 
 4 files changed, 1846 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..bc581473c0b0 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,109 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_BASE_RATE,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+   TCA_CAKE_ACK_FILTER,
+   TCA_CAKE_SPLIT_GSO,
+   __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX   (__

[Cake] [PATCH net-next v9 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-08 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  123 ++
 1 file changed, 122 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 6f9980a6603e..af25ddb333d7 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -276,6 +276,7 @@ enum {
 
 struct cobalt_skb_cb {
cobalt_time_t enqueue_time;
+   u32   adjusted_len;
 };
 
 static cobalt_time_t cobalt_get_time(void)
@@ -1131,6 +1132,87 @@ static cobalt_time_t cake_ewma(cobalt_time_t avg, 
cobalt_time_t sample,
return avg;
 }
 
+static u32 cake_overhead(struct cake_sched_data *q, struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   if (unlikely(shinfo->gso_size)) {
+   /* borrowed from qdisc_pkt_len_init() */
+   unsigned int hdr_len;
+
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+  SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   /* The last segment may be shorter; we ignore this, which means
+* that we will over-estimate the size of the whole GSO segment
+* by the difference in size. This is conservative, so we live
+* with that to avoid the complexity of dealing with it.
+*/
+   len = shinfo->gso_size + hdr_len;
+   }
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   get_cobalt_cb(skb)->adjusted_len = len * segs;
+   return len;
+}
+
 static void cake_heap_swap(struct cake_sched_da

Re: [Cake] [PATCH net-next v9 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-14 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Tue, 08 May 2018 16:34:19 +0200
>
>> +struct cake_flow {
>> +/* this stuff is all needed per-flow at dequeue time */
>> +struct sk_buff*head;
>> +struct sk_buff*tail;
>
> Please do not invent your own SKB list handling mechanism.

We didn't invent it, we inherited it from fq_codel. I was actually about
to fix that, but then I noticed it was still around in fq_codel, and so
let it be. I can certainly fix it anyway, but, erm, why is it acceptable
in fq_codel but not in cake? struct sk_buff_head is not that new, is it?

>> +static void cake_heapify(struct cake_sched_data *q, u16 i)
>> +{
>> +static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
>> +u32 m = i;
>> +u32 mb = cake_heap_get_backlog(q, m);
>
> Please order local variables from longest to shortest line.
>
> The entire submissions has this problem all over the place, please
> correct it patch-series wide.

Right-oh, one plantation of reverse christmas trees coming right up :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v9 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-14 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Mon, 14 May 2018 11:08:28 +0200
>
>> David Miller  writes:
>> 
>>> From: Toke Høiland-Jørgensen 
>>> Date: Tue, 08 May 2018 16:34:19 +0200
>>>
>>>> +struct cake_flow {
>>>> +  /* this stuff is all needed per-flow at dequeue time */
>>>> +  struct sk_buff*head;
>>>> +  struct sk_buff*tail;
>>>
>>> Please do not invent your own SKB list handling mechanism.
>> 
>> We didn't invent it, we inherited it from fq_codel. I was actually about
>> to fix that, but then I noticed it was still around in fq_codel, and so
>> let it be. I can certainly fix it anyway, but, erm, why is it acceptable
>> in fq_codel but not in cake? struct sk_buff_head is not that new, is it?
>
> I guess one argument has to do with the amount of memory consumed by this
> per-flow or per-queue information, right?  Because the skb queue head has
> a qlen and a spinlock regardless of whether they are used or not.
>
> Furthermore, if you use the __skb_insert() et al. helpers, even though it
> won't use the lock it will adjust the qlen counter.  And that's useless
> work since you have no use for the qlen value.

I think the useless work issue is larger than the memory usage. When
running this (or FQ-CoDel) on small memory-constrained routers, we've
mostly had issues with OOM because of the packet data, which dwarfs the
per-queue overhead.

> Taken together, it seems that what you and fq_codel are doing is not
> such a bad idea after all.  So please leave it alone.

OK. I'll just resend with prettier Christmas trees, then :)

> On-and-off again, I've looked into converting skbs to using list_head
> but it's a non-trivial set of work. All over the tree the different
> layers use the next/prev pointers in different ways. Some use it for a
> doubly linked list. Some use it for a singly linked list. Some encode
> state in the prev pointer. You name it, it's out there.
>
> I'll try to get back to that task because obviously it'll be useful to
> have code like cake and fq_codel use common helpers instead of custom
> stuff.

Yup, I agree. From a code readability point of view, I also prefer the
helpers.

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v10 2/7] sch_cake: Add ingress mode

2018-05-14 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   78 +++---
 1 file changed, 74 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index e22c712602fa..179bfa9e501f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -442,7 +442,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   cobalt_time_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
cobalt_tdiff_t sojourn, schedule;
@@ -465,6 +466,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = now - cobalt_get_enqueue_time(skb);
schedule = now - vars->drop_next;
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && schedule >= 0;
 
@@ -915,6 +917,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -990,8 +995,39 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = now - q->last_packet_time;
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = cake_ewma(q->avg_packet_interval,
+  packet_interval,
+   packet_interval > q->avg_packet_interval ? 2 : 8);
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = now - q->avg_window_begin;
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (now - q->last_reconfig_time > (NSEC_PER_SEC / 4)) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;
+   }
 
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1246,14 +1282,26 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
 
/* Last packet in queue may be marked, shouldn't be dropped */
-   if (!cobalt_should_drop(&fl

[Cake] [PATCH net-next v10 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-14 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2684 
 4 files changed, 2801 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v10 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-14 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  123 ++
 1 file changed, 122 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index ccc6f26b306c..6314a089a204 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -275,6 +275,7 @@ enum {
 
 struct cobalt_skb_cb {
cobalt_time_t enqueue_time;
+   u32   adjusted_len;
 };
 
 static cobalt_time_t cobalt_get_time(void)
@@ -1130,6 +1131,87 @@ static cobalt_time_t cake_ewma(cobalt_time_t avg, 
cobalt_time_t sample,
return avg;
 }
 
+static u32 cake_overhead(struct cake_sched_data *q, struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   if (unlikely(shinfo->gso_size)) {
+   /* borrowed from qdisc_pkt_len_init() */
+   unsigned int hdr_len;
+
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+  SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   /* The last segment may be shorter; we ignore this, which means
+* that we will over-estimate the size of the whole GSO segment
+* by the difference in size. This is conservative, so we live
+* with that to avoid the complexity of dealing with it.
+*/
+   len = shinfo->gso_size + hdr_len;
+   }
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   get_cobalt_cb(skb)->adjusted_len = len * segs;
+   return len;
+}
+
 static void cake_heap_swap(struct cake_sched_da

[Cake] [PATCH net-next v10 3/7] sch_cake: Add optional ACK filter

2018-05-14 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  260 ++
 1 file changed, 258 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 179bfa9e501f..4bc178c09f3a 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -764,6 +764,239 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+
+   /* special-case 6in4 tunnelling, as that is a common way to get
+* v6 connectivity in the home
+*/
+   if (iph->protocol == IPPROT

[Cake] [PATCH net-next v10 5/7] sch_cake: Add DiffServ handling

2018-05-14 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches CAKE's
qdisc handle, the minor number will be interpreted as a priority tier if it is
less than or equal to the number of configured priority tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  408 +-
 1 file changed, 401 insertions(+), 7 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 2802bb2ace84..ccc6f26b306c 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -307,6 +307,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1224,6 +1286,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v10 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-14 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1724 
 4 files changed, 1841 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..bc581473c0b0 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,109 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_BASE_RATE,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v10 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-14 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   72 ++
 1 file changed, 72 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 4bc178c09f3a..2802bb2ace84 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -522,6 +528,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -549,6 +609,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1716,6 +1779,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK);
 
+   if (tb[TCA_CAKE_NAT]) {
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+   }
+
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
 
@@ -1880,6 +1949,9 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT, !!(q->flow_mode & 
CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v10 7/7] sch_cake: Conditionally split GSO segments

2018-05-14 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 6314a089a204..0c2cf8bc0a03 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 #define US2TIME(a) (a * (u64)NSEC_PER_USEC)
 
 typedef u64 cobalt_time_t;
@@ -1477,36 +1478,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]+= len;
+   b->tin_b

Re: [Cake] [PATCH net-next v10 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-15 Thread Toke Høiland-Jørgensen
Toke Høiland-Jørgensen  writes:

> This commit adds configurable overhead compensation support to the rate
> shaper. With this feature, userspace can configure the actual bottleneck
> link overhead and encapsulation mode used, which will be used by the shaper
> to calculate the precise duration of each packet on the wire.
>
> This feature is needed because CAKE is often deployed one or two hops
> upstream of the actual bottleneck (which can be, e.g., inside a DSL or
> cable modem). In this case, the link layer characteristics and overhead
> reported by the kernel does not match the actual bottleneck. Being able to
> set the actual values in use makes it possible to configure the shaper rate
> much closer to the actual bottleneck rate (our experience shows it is
> possible to get with 0.1% of the actual physical bottleneck rate), thus
> keeping latency low without sacrificing bandwidth.
>
> The overhead compensation has three tunables: A fixed per-packet overhead
> size (which, if set, will be accounted from the IP packet header), a
> minimum packet size (MPU) and a framing mode supporting either ATM or PTM
> framing. We include a set of common keywords in TC to help users configure
> the right parameters. If no overhead value is set, the value reported by
> the kernel is used.
>
> Signed-off-by: Toke Høiland-Jørgensen 
> ---
>  net/sched/sch_cake.c |  123 
> ++
>  1 file changed, 122 insertions(+), 1 deletion(-)
>
> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
> index ccc6f26b306c..6314a089a204 100644
> --- a/net/sched/sch_cake.c
> +++ b/net/sched/sch_cake.c
> @@ -275,6 +275,7 @@ enum {
>  
>  struct cobalt_skb_cb {
>   cobalt_time_t enqueue_time;
> + u32   adjusted_len;
>  };
>  
>  static cobalt_time_t cobalt_get_time(void)
> @@ -1130,6 +1131,87 @@ static cobalt_time_t cake_ewma(cobalt_time_t avg, 
> cobalt_time_t sample,
>   return avg;
>  }
>  
> +static u32 cake_overhead(struct cake_sched_data *q, struct sk_buff *skb)
> +{
> + const struct skb_shared_info *shinfo = skb_shinfo(skb);
> + u32 off = skb_network_offset(skb);
> + u32 len = qdisc_pkt_len(skb);
> + u16 segs = 1;
> +
> + if (unlikely(shinfo->gso_size)) {
> + /* borrowed from qdisc_pkt_len_init() */
> + unsigned int hdr_len;
> +
> + hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
> +
> + /* + transport layer */
> + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
> +SKB_GSO_TCPV6))) {
> + const struct tcphdr *th;
> + struct tcphdr _tcphdr;
> +
> + th = skb_header_pointer(skb, skb_transport_offset(skb),
> + sizeof(_tcphdr), &_tcphdr);
> + if (likely(th))
> + hdr_len += __tcp_hdrlen(th);
> + } else {
> + struct udphdr _udphdr;
> +
> + if (skb_header_pointer(skb, skb_transport_offset(skb),
> +sizeof(_udphdr), &_udphdr))
> + hdr_len += sizeof(struct udphdr);
> + }
> +
> + if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
> + segs = DIV_ROUND_UP(skb->len - hdr_len,
> + shinfo->gso_size);
> + else
> + segs = shinfo->gso_segs;
> +
> + /* The last segment may be shorter; we ignore this, which means
> +  * that we will over-estimate the size of the whole GSO segment
> +  * by the difference in size. This is conservative, so we live
> +  * with that to avoid the complexity of dealing with it.
> +  */
> + len = shinfo->gso_size + hdr_len;
> + }
> +
> + q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
> +
> + if (q->rate_flags & CAKE_FLAG_OVERHEAD)
> + len -= off;
> +
> + if (q->max_netlen < len)
> + q->max_netlen = len;
> + if (q->min_netlen > len)
> + q->min_netlen = len;
> +
> + len += q->rate_overhead;
> +
> + if (len < q->rate_mpu)
> + len = q->rate_mpu;
> +
> + if (q->atm_mode == CAKE_ATM_ATM) {
> + len += 47;
> + len /= 48;
> + len *= 53;
> + } else if (q->atm_mode == CAKE_ATM_PTM) {
> + /* Add one byte per 64 bytes or part t

[Cake] [PATCH net-next v11 3/7] sch_cake: Add optional ACK filter

2018-05-15 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  260 ++
 1 file changed, 258 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 8d0823d6d8dd..371c888cb982 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -763,6 +763,239 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+
+   /* special-case 6in4 tunnelling, as that is a common way to get
+* v6 connectivity in the home
+*/
+   if (iph->protocol == IPPROT

[Cake] [PATCH net-next v11 5/7] sch_cake: Add DiffServ handling

2018-05-15 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  408 +-
 1 file changed, 401 insertions(+), 7 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 14622c1d970e..4b13345f85dc 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -307,6 +307,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1223,6 +1285,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v11 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-15 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   73 ++
 1 file changed, 73 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 371c888cb982..14622c1d970e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -522,6 +528,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -549,6 +609,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1715,6 +1778,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK);
 
+   if (tb[TCA_CAKE_NAT]) {
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+   }
+
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
 
@@ -1880,6 +1949,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v11 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-15 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1730 
 4 files changed, 1847 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..883e84f008d7 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,109 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v11 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-15 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 4b13345f85dc..a4aad577bf8e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -275,6 +275,7 @@ enum {
 
 struct cobalt_skb_cb {
cobalt_time_t enqueue_time;
+   u32   adjusted_len;
 };
 
 static cobalt_time_t cobalt_get_time(void)
@@ -1129,6 +1130,88 @@ static cobalt_time_t cake_ewma(cobalt_time_t avg, 
cobalt_time_t sample,
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1206,7 +1289,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   

[Cake] [PATCH net-next v11 2/7] sch_cake: Add ingress mode

2018-05-15 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   78 +++---
 1 file changed, 74 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9d7fc1848778..8d0823d6d8dd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -442,7 +442,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   cobalt_time_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
cobalt_tdiff_t sojourn, schedule;
@@ -465,6 +466,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = now - cobalt_get_enqueue_time(skb);
schedule = now - vars->drop_next;
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && schedule >= 0;
 
@@ -914,6 +916,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -989,8 +994,39 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = now - q->last_packet_time;
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = cake_ewma(q->avg_packet_interval,
+  packet_interval,
+   packet_interval > q->avg_packet_interval ? 2 : 8);
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = now - q->avg_window_begin;
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (now - q->last_reconfig_time > (NSEC_PER_SEC / 4)) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;
+   }
 
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1245,14 +1281,26 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
 
/* Last packet in queue may be marked, shouldn't be dropped */
-   if (!cobalt_should_drop(&fl

[Cake] [PATCH net-next v11 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-15 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2692 
 4 files changed, 2809 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v11 7/7] sch_cake: Conditionally split GSO segments

2018-05-15 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index a4aad577bf8e..a0c2925b4c54 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 #define US2TIME(a) (a * (u64)NSEC_PER_USEC)
 
 typedef u64 cobalt_time_t;
@@ -1477,36 +1478,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]+= len;
+   b->tin_b

[Cake] [PATCH v8] Add support for cake qdisc

2018-05-15 Thread Toke Høiland-Jørgensen
sch_cake is intended to squeeze the most bandwidth and latency out of even
the slowest ISP links and routers, while presenting an API simple enough
that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash besteffort

Cake is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Support for DSL framing types and shapers.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency variation.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

Cake's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
Changelog:
v8:
  - Change rates to 64bit values (apparently, 32 Gbps is not enough for
everyone).

v7:
  - Move the target/interval presets to a table and check that only
one is passed.

v6:
  - Identical to v5 because apparently I don't git so well... :/

v5:
  - Print the SPLIT_GSO flag
  - Switch to print_u64() for JSON output
  - Fix a format string for mpu option output

v4:
  - Switch stats parsing to use nested netlink attributes
  - Tweaks to JSON stats output keys

v3:
  - Remove accidentally included test flag

v2:
  - Updated netlink config ABI
  - Remove diffserv-llt mode
  - Various tweaks and clean-ups of stats output

 man/man8/tc-cake.8 | 632 ++
 man/man8/tc.8  |   1 +
 tc/Makefile|   1 +
 tc/q_cake.c| 750 +
 4 files changed, 1384 insertions(+)
 create mode 100644 man/man8/tc-cake.8
 create mode 100644 tc/q_cake.c

diff --git a/man/man8/tc-cake.8 b/man/man8/tc-cake.8
new file mode 100644
index ..dff2e360
--- /dev/null
+++ b/man/man8/tc-cake.8
@@ -0,0 +1,632 @@
+.TH CAKE 8 "27 April 2018" "iproute2" "Linux"
+.SH NAME
+CAKE \- Common Applications Kept Enhanced (CAKE)
+.SH SYNOPSIS
+.B tc qdisc ... cake
+.br
+[
+.BR bandwidth
+RATE |
+.BR unlimited*
+|
+.BR autorate_ingress
+]
+.br
+[
+.BR rtt
+TIME |
+.BR datacentre
+|
+.BR lan
+|
+.BR metro
+|
+.BR regional
+|
+.BR internet*
+|
+.BR oceanic
+|
+.BR satellite
+|
+.BR interplanetary
+]
+.br
+[
+.BR besteffort
+|
+.BR diffserv8
+|
+.BR diffserv4
+|
+.BR diffserv3*
+]
+.br
+[
+.BR flowblind
+|
+.BR srchost
+|
+.BR dsthost
+|
+.BR hosts
+|
+.BR flows
+|
+.BR dual-srchost
+|
+.BR dual-dsthost
+|
+.BR triple-isolate*
+]
+.br
+[
+.BR nat
+|
+.BR nonat*
+]
+.br
+[
+.BR wash
+|
+.BR nowash*
+]
+.br
+[
+.BR ack-filter
+|
+.BR ack-filter-aggressive
+|
+.BR no-ack-filter*
+]
+.br
+[
+.BR memlimit
+LIMIT ]
+.br
+[
+.BR ptm
+|
+.BR atm
+|
+.BR noatm*
+]
+.br
+[
+.BR overhead
+N |
+.BR conservative
+|
+.BR raw*
+]
+.br
+[
+.BR mpu
+N ]
+.br
+[
+.BR ingress
+|
+.BR egress*
+]
+.br
+(* marks defaults)
+
+
+.SH DESCRIPTION
+CAKE (Common Applications Kept Enhanced) is a shaping-capable queue discipline
+which uses both AQM and FQ.  It combines COBALT, which is an AQM algorithm
+combining Codel and BLUE, a shaper which operates in deficit mode, and a 
variant
+of DRR++ for flow isolation.  8-way set-associative hashing is used to 
virtually
+eliminate hash collisions.  Priority queuing is available through a simplified
+diffserv implementation.  Overhead compensation for various encapsulation
+schemes is tightly integrated.
+
+All settings are optional; the default settings are chosen to be sensible in
+most common deployments.  Most people will only need to set the
+.B bandwidth
+parameter to get useful results, but reading the
+.B Overhead Compensation
+and
+.B Round Trip Tim

Re: [Cake] [PATCH net-next v11 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-16 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Tue, 15 May 2018 17:12:44 +0200
>
>> +typedef u64 cobalt_time_t;
>> +typedef s64 cobalt_tdiff_t;
>  ...
>> +static cobalt_time_t cobalt_get_time(void)
>> +{
>> +return ktime_get_ns();
>> +}
>> +
>> +static u32 cobalt_time_to_us(cobalt_time_t val)
>> +{
>> +do_div(val, NSEC_PER_USEC);
>> +return (u32)val;
>> +}
>
> If fundamentally you are working with ktime_t values, please use that type
> and the associated helpers.
>
> This is a valid argument that using custom typedefs provide documentation
> and an aid to understanding, but I think it doesn't serve that purpose
> very well here.
>
> So please just use ktime_t throughout instead of this cobalt_time_t
> and cobalt_tdiff_t.  And then use helpers like ktime_to_us() which
> properly optimize for 64-bit vs. 32-bit hosts.

Can do :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v12 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-16 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index f0f94d536e51..1ce81d919f73 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -271,6 +271,7 @@ enum {
 
 struct cobalt_skb_cb {
ktime_t enqueue_time;
+   u32 adjusted_len;
 };
 
 static u64 us_to_ns(u64 us)
@@ -1120,6 +1121,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1197,7 +1280,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   ktime_t now, bool drop)
 {
-   u32 len = q

[Cake] [PATCH net-next v12 7/7] sch_cake: Conditionally split GSO segments

2018-05-16 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 1ce81d919f73..dca276806e9f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1474,36 +1475,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

[Cake] [PATCH net-next v12 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-16 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v12:
  - Get rid of custom time typedefs. Use ktime_t for time and u64 for
duration instead.

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2709 
 4 files changed, 2826 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v12 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-16 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  105 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1739 
 4 files changed, 1856 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..883e84f008d7 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,109 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v12 5/7] sch_cake: Add DiffServ handling

2018-05-16 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  407 +-
 1 file changed, 401 insertions(+), 6 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index e1038a7b6686..f0f94d536e51 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -297,6 +297,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1219,6 +1281,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v12 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-16 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   73 ++
 1 file changed, 73 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 65439b643c92..e1038a7b6686 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -514,6 +520,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -541,6 +601,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1727,6 +1790,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK);
 
+   if (tb[TCA_CAKE_NAT]) {
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+   }
+
if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
 
@@ -1892,6 +1961,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v12 3/7] sch_cake: Add optional ACK filter

2018-05-16 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  260 ++
 1 file changed, 258 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d515f18f8460..65439b643c92 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -755,6 +755,239 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+
+   /* special-case 6in4 tunnelling, as that is a common way to get
+* v6 connectivity in the home
+*/
+   if (iph->protocol == IPPROT

[Cake] [PATCH net-next v12 2/7] sch_cake: Add ingress mode

2018-05-16 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   85 --
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 422cfccbf37f..d515f18f8460 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -433,7 +433,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   ktime_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -457,6 +458,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && schedule >= 0;
 
@@ -910,6 +912,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -986,8 +991,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = \
+   ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = \
+   cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = \
+   ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (ktime_after(now,
+   ktime_add_ms(q->last_reconfig_time,
+250))) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;
+   }
 
 

Re: [Cake] [PATCH net-next v12 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-16 Thread Toke Høiland-Jørgensen
Cong Wang  writes:

> On Wed, May 16, 2018 at 1:29 PM, Toke Høiland-Jørgensen  wrote:
>> +
>> +static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
>> +{
>> +   return NULL;
>> +}
>> +
>> +static unsigned long cake_find(struct Qdisc *sch, u32 classid)
>> +{
>> +   return 0;
>> +}
>> +
>> +static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
>> +{
>> +}
>
>
> Thanks for adding the support to other TC filters, it is much better
> now!

You're welcome. Turned out not to be that hard :)

> A quick question: why class_ops->dump_stats is still NULL?
>
> It is supposed to dump the stats of each flow. Is there still any
> difficulty to map it to tc class? I thought you figured it out when
> you added the tcf_classify().

On the classify side, I solved the "multiple sets of queues" problem by
using skb->priority to select the tin (diffserv tier) and the classifier
output to select the queue within that tin. This would not work for
dumping stats; some other way of mapping queues to the linear class
space would be needed. And since we are not actually collecting any
per-flow stats that I could print, I thought it wasn't worth coming up
with a half-baked proposal for this just to add an API hook that no one
in the existing CAKE user base has ever asked for...

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v12 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-16 Thread Toke Høiland-Jørgensen
Cong Wang  writes:

> On Wed, May 16, 2018 at 1:29 PM, Toke Høiland-Jørgensen  wrote:
>> When CAKE is deployed on a gateway that also performs NAT (which is a
>> common deployment mode), the host fairness mechanism cannot distinguish
>> internal hosts from each other, and so fails to work correctly.
>>
>> To fix this, we add an optional NAT awareness mode, which will query the
>> kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
>> and use that in the flow and host hashing.
>>
>> When the shaper is enabled and the host is already performing NAT, the cost
>> of this lookup is negligible. However, in unlimited mode with no NAT being
>> performed, there is a significant CPU cost at higher bandwidths. For this
>> reason, the feature is turned off by default.
>>
>> Signed-off-by: Toke Høiland-Jørgensen 
>> ---
>>  net/sched/sch_cake.c |   73 
>> ++
>>  1 file changed, 73 insertions(+)
>>
>> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
>> index 65439b643c92..e1038a7b6686 100644
>> --- a/net/sched/sch_cake.c
>> +++ b/net/sched/sch_cake.c
>> @@ -71,6 +71,12 @@
>>  #include 
>>  #include 
>>
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +#include 
>> +#include 
>> +#include 
>> +#endif
>> +
>>  #define CAKE_SET_WAYS (8)
>>  #define CAKE_MAX_TINS (8)
>>  #define CAKE_QUEUES (1024)
>> @@ -514,6 +520,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
>> return drop;
>>  }
>>
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +
>> +static void cake_update_flowkeys(struct flow_keys *keys,
>> +const struct sk_buff *skb)
>> +{
>> +   const struct nf_conntrack_tuple *tuple;
>> +   enum ip_conntrack_info ctinfo;
>> +   struct nf_conn *ct;
>> +   bool rev = false;
>> +
>> +   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
>> +   return;
>> +
>> +   ct = nf_ct_get(skb, &ctinfo);
>> +   if (ct) {
>> +   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
>> +   } else {
>> +   const struct nf_conntrack_tuple_hash *hash;
>> +   struct nf_conntrack_tuple srctuple;
>> +
>> +   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
>> +  NFPROTO_IPV4, dev_net(skb->dev),
>> +  &srctuple))
>> +   return;
>> +
>> +   hash = nf_conntrack_find_get(dev_net(skb->dev),
>> +&nf_ct_zone_dflt,
>> +&srctuple);
>> +   if (!hash)
>> +   return;
>> +
>> +   rev = true;
>> +   ct = nf_ct_tuplehash_to_ctrack(hash);
>> +   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
>> +   }
>> +
>> +   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
>> +   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
>> +
>> +   if (keys->ports.ports) {
>> +   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
>> +   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
>> +   }
>> +   if (rev)
>> +   nf_ct_put(ct);
>> +}
>> +#else
>> +static void cake_update_flowkeys(struct flow_keys *keys,
>> +const struct sk_buff *skb)
>> +{
>> +   /* There is nothing we can do here without CONNTRACK */
>> +}
>> +#endif
>> +
>>  /* Cake has several subtle multiple bit settings. In these cases you
>>   *  would be matching triple isolate mode as well.
>>   */
>> @@ -541,6 +601,9 @@ static u32 cake_hash(struct cake_tin_data *q, const 
>> struct sk_buff *skb,
>> skb_flow_dissect_flow_keys(skb, &keys,
>>FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
>>
>> +   if (flow_mode & CAKE_FLOW_NAT_FLAG)
>> +   cake_update_flowkeys(&keys, skb);
>> +
>> /* flow_hash_from_keys() sorts the addresses by value, so we have
>>  * to preserve their order in a separate data structure to treat
>>  * src and dst host addresses as independently selectable.
>> @@ -1727,6 +1790,12 @@ static int cake_change(struct Qdisc *sch, struct 
>> nlattr *opt,
>> q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
>> CAKE_FLOW_MASK);
>>
>> +   if (tb[TCA_CAKE_NAT]) {
>> +   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
>> +   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
>> +   !!nla_get_u32(tb[TCA_CAKE_NAT]);
>> +   }
>
>
> I think it's better to return -EOPNOTSUPP when CONFIG_NF_CONNTRACK
> is not enabled.

Good point, will fix :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v12 2/7] sch_cake: Add ingress mode

2018-05-16 Thread Toke Høiland-Jørgensen
Cong Wang  writes:

> On Wed, May 16, 2018 at 1:29 PM, Toke Høiland-Jørgensen  wrote:
>> +   if (tb[TCA_CAKE_AUTORATE]) {
>> +   if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
>> +   q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
>> +   else
>> +   q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
>> +   }
>> +
>> +   if (tb[TCA_CAKE_INGRESS]) {
>> +   if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
>> +   q->rate_flags |= CAKE_FLAG_INGRESS;
>> +   else
>> +   q->rate_flags &= ~CAKE_FLAG_INGRESS;
>> +   }
>> +
>> if (tb[TCA_CAKE_MEMORY])
>> q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
>>
>> @@ -1559,6 +1628,14 @@ static int cake_dump(struct Qdisc *sch, struct 
>> sk_buff *skb)
>> if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
>> goto nla_put_failure;
>>
>> +   if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
>> +   !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
>> +   goto nla_put_failure;
>> +
>> +   if (nla_put_u32(skb, TCA_CAKE_INGRESS,
>> +   !!(q->rate_flags & CAKE_FLAG_INGRESS)))
>> +   goto nla_put_failure;
>> +
>
> Why do you want to dump each bit of the rate_flags separately rather than
> dumping the whole rate_flags as an integer?

Well, these were added one at a time, each as a new option. Isn't that
more or less congruent with how netlink attributes are supposed to be
used?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] Does the latest cake support "tc filter"?

2018-05-17 Thread Toke Høiland-Jørgensen
Fushan Wen  writes:

> Hello developers,
> I've seen the mail in the netdev mailing list, saying "other tc
> filters supported". So can I use "tc filter" to attach specified
> traffic to a specified tin without DSCP marks? It's helpful when
> dealing with ingress traffic where iptables DSCP mark won't work.

Yes, the version submitted to upstream supports this. You can override
which tin packets goes in by setting skb->priority from a filter or
application (the major number needs to be set to the qdisc ID, and the
minor number becomes the tin to queue packets in).

However, these changes have not been backported, so they are not
available in the cobalt branch. You can pull them from the upstream-4.18
branch instead. That should build against a current net-next tree, and
possibly a bit older. Alternatively, you can pull the current patches
from patchwork and use those. Or wait for a backport after we're done
with the upstream submission :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v12 3/7] sch_cake: Add optional ACK filter

2018-05-17 Thread Toke Høiland-Jørgensen
Eric Dumazet  writes:

> On 05/16/2018 01:29 PM, Toke Høiland-Jørgensen wrote:
>> The ACK filter is an optional feature of CAKE which is designed to improve
>> performance on links with very asymmetrical rate limits. On such links
>> (which are unfortunately quite prevalent, especially for DSL and cable
>> subscribers), the downstream throughput can be limited by the number of
>> ACKs capable of being transmitted in the *upstream* direction.
>> 
>
> ...
>
>> 
>> Signed-off-by: Toke Høiland-Jørgensen 
>> ---
>>  net/sched/sch_cake.c |  260 
>> ++
>>  1 file changed, 258 insertions(+), 2 deletions(-)
>> 
>>
>
> I have decided to implement ACK compression in TCP stack itself.

Awesome! Will look forward to seeing that!

> First step is to take care of SACK, which are the main source of the
> bloat, since we send one SACK for every incoming out-of-order packet.
>
> These SACK are not only causing pain on the network, they also cause
> the sender to send one MSS at a time (TSO auto defer is not engaged in
> this case), thus starting to fill its RTX queue with pathological skbs
> (1-MSS each), increasing processing time.
>
> I see that your ACK filter does not take care of this common case :)

We don't do full parsing of SACKs, no; we were trying to keep things
simple... We do detect the presence of SACK options, though, and the
presence of SACK options on an ACK will make previous ACKs be considered
redundant.

> Doing the filtering in TCP has the immense advantage of knowing the
> RTT and thus be able to use heuristics causing less damage.

Quite so. I'll be quite happy if the CAKE ACK filter can be delegated to
something only relevant for the poor sods stuck on proprietary operating
systems :)


Are you satisfied that the current version of the filter doesn't mangle
the skbs or crash the kernel?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v12 3/7] sch_cake: Add optional ACK filter

2018-05-17 Thread Toke Høiland-Jørgensen
Eric Dumazet  writes:

> On 05/17/2018 04:23 AM, Toke Høiland-Jørgensen wrote:
>
>> 
>> We don't do full parsing of SACKs, no; we were trying to keep things
>> simple... We do detect the presence of SACK options, though, and the
>> presence of SACK options on an ACK will make previous ACKs be considered
>> redundant.
>> 
>
> But they are not redundant in some cases, particularly when reorders
> happen in the network.

Huh. I was under the impression that SACKs were basically cumulative
until cleared.

I.e., in packet sequence ABCDE where B and D are lost, C would have
SACK(B) and E would have SACK(B,D). Are you saying that E would only
have SACK(D)?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] tc not showing detailed stats

2018-05-20 Thread Toke Høiland-Jørgensen
Georgios Amanakis  writes:

> This is on x86_64 running 4.16.9.

OK, that is decidedly odd. I have just spent the last several hours
debugging this, and I'm starting to lean towards this being an unrelated
bug in openwrt, or something triggering such a bug. It seems tc doesn't
get the 'TCA_STATS2' attribute, for *any* qdiscs... I have no idea why.

>> > On 20 May 2018, at 14:23, Georgios Amanakis  wrote:
>> >
>> > Hi All,
>> >
>> > I am running latest tc-adv (r4227) and sch-cake/cobalt (r500), and "tc
>> > -s qdisc show" doesn't show detailed stats anymore. Latest known
>> > working revisions for me were tc-adv:r4226 sch-cake:r494.

What are the corresponding git commits; and does it still work if you
revert to the previous versions of tc-adv/cake? How are you building
tc-adv?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] tc not showing detailed stats

2018-05-20 Thread Toke Høiland-Jørgensen
Toke Høiland-Jørgensen  writes:

> Georgios Amanakis  writes:
>
>> This is on x86_64 running 4.16.9.
>
> OK, that is decidedly odd. I have just spent the last several hours
> debugging this, and I'm starting to lean towards this being an unrelated
> bug in openwrt, or something triggering such a bug. It seems tc doesn't
> get the 'TCA_STATS2' attribute, for *any* qdiscs... I have no idea why.
>
>>> > On 20 May 2018, at 14:23, Georgios Amanakis  wrote:
>>> >
>>> > Hi All,
>>> >
>>> > I am running latest tc-adv (r4227) and sch-cake/cobalt (r500), and "tc
>>> > -s qdisc show" doesn't show detailed stats anymore. Latest known
>>> > working revisions for me were tc-adv:r4226 sch-cake:r494.
>
> What are the corresponding git commits; and does it still work if you
> revert to the previous versions of tc-adv/cake? How are you building
> tc-adv?

Also, which kernel version?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] tc not showing detailed stats

2018-05-20 Thread Toke Høiland-Jørgensen
Georgios Amanakis  writes:

> I am running everything on 4.16.9.
> It seems the culprit is sch-cake.
> tc-adv@d52fe00 and sch-cake@66e5d6 : detailed stats are printed
> tc-adv@d52fe00 and sch-cake@842d7f : details not printed anymore. 
>
> Could it be that the culprit is sch-cake@842d7f?

Well, that commit breaks compatibility with tc-adv (again :P). You'll
need to recompile tc-adv and make sure you have commit
aa554002af169a135cb82fb809842f5d7afce309

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] tc not showing detailed stats

2018-05-21 Thread Toke Høiland-Jørgensen
Georgios Amanakis  writes:

> I am well aware of that :)
> On my system though with tc-adv@aa554002 and sch_cake/cobalt@842d7f0
> it doesn't produce any detailed stats:

Hmm, the thing is that I can reproduce your error if I use commit
d52fe0077637caa1e3a4b1242d2bf935929b8275 in tc-adv I can reproduce your
error, but not if I use the current HEAD (aa554002). So I'm guessing
there's something else going on here, and moving the PAD attribute is
just treating a symptom. Did you do `make clean && make` in the tc-adv
repo? Just changing pkt_sched.h doesn't trigger a rebuild if you just
run make...

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] tc not showing detailed stats

2018-05-21 Thread Toke Høiland-Jørgensen
Georgios Amanakis  writes:

> Of course you are right. I was using archlinux's build system which
> produces the bug somehow. If I compile directly from the source everything
> works normal.
>
> I am sorry for the mess this caused.

No worries. We are still debugging another issue like it on openwrt...
And apologies for the frequent API changes; I am making it a bit harder
to track git for the time being...

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v13 2/7] sch_cake: Add ingress mode

2018-05-21 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   85 --
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ea4aa261cec..10e208e4255d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   ktime_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
 
@@ -913,6 +915,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -990,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = \
+   ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = \
+   cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = \
+   ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (ktime_after(now,
+   ktime_add_ms(q->last_reconfig_time,
+250))) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;

[Cake] [PATCH net-next v13 3/7] sch_cake: Add optional ACK filter

2018-05-21 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* bytes than the new packet being enqueued, including any
SACK options. This prevents duplicate ACKs from being filtered, to avoid
interfering with retransmission logic. In addition, we check TCP header
options and only drop those that are known to not interfere with sender
state. In particular, packets with unknown option codes are never dropped.

In aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  425 ++
 1 file changed, 423 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10e208e4255d..c20f33940a57 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -757,6 +757,404 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+
+   /* special-case 6in4 tun

[Cake] [PATCH net-next v13 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-21 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   79 ++
 1 file changed, 79 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c20f33940a57..116c935b2914 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -543,6 +603,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1891,6 +1954,18 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
if (err < 0)
return err;
 
+   if (tb[TCA_CAKE_NAT]) {
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+   NL_SET_ERR_MSG_ATTR(extack, "No conntrack support in kernel",
+   tb[TCA_CAKE_NAT]);
+   return -EOPNOTSUPP;
+#endif
+   }
+
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
 
@@ -2063,6 +2138,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_

[Cake] [PATCH net-next v13 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-21 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1850 
 4 files changed, 1975 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..07648e6ea569 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,117 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_PAD,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v13 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-21 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d025899ef11f..4bccfee714c8 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -272,6 +272,7 @@ enum {
 
 struct cobalt_skb_cb {
ktime_t enqueue_time;
+   u32 adjusted_len;
 };
 
 static u64 us_to_ns(u64 us)
@@ -1287,6 +1288,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1364,7 +1447,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   ktime_t now, bool drop)
 {
-   u32 len = q

[Cake] [PATCH net-next v13 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-21 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v13:
  - Avoid ktime_t to scalar compares
  - Add class dumping and basic stats
  - Fail with ENOTSUPP when requesting NAT mode and conntrack is not
available.
  - Parse all TCP options in ACK filter and make sure to only drop safe
ones. Also handle SACK ranges properly.

v12:
  - Get rid of custom time typedefs. Use ktime_t for time and u64 for
duration instead.

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2992 
 4 files changed, 3117 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v13 5/7] sch_cake: Add DiffServ handling

2018-05-21 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  412 +-
 1 file changed, 404 insertions(+), 8 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 116c935b2914..d025899ef11f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -298,6 +298,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1387,6 +1449,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v13 7/7] sch_cake: Conditionally split GSO segments

2018-05-21 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 4bccfee714c8..1bf413759bd4 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1643,36 +1644,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

Re: [Cake] [PATCH net-next v13 3/7] sch_cake: Add optional ACK filter

2018-05-21 Thread Toke Høiland-Jørgensen
Eric Dumazet  writes:

> On 05/21/2018 09:24 AM, Toke Høiland-Jørgensen wrote:
>
>> +while (oplen_tmp >= 8) {
>> +u32 right_b = get_unaligned_be32(sack_tmp + 4);
>> +u32 left_b = get_unaligned_be32(sack_tmp);
>> +
>> +if (left_b >= right_b)
>> +continue;
>> +
>> +if (first)
>> +bytes_b += right_b - left_b;
>> +
>> +if (left_b <= left_a && right_a <= right_b) {
>> +found = true;
>> +if (!first)
>> +break;
>> +}
>> +oplen_tmp -= 8;
>> +sack_tmp += 8;
>> +}
>>
>
> This is obviously incorrect.
>
> Please take a look at TCP stack, and how it handles sequence wrapping
> with following macros :
>
> after(), before()

Ah yes, sequence number wrapping. I was thinking I needed to deal with
that, and then got sidetracked and forgot about it. Will fix.

Other than that, do you agree that this approach to SACK and header
handling can work?

> Quite frankly O wonder how this was really tested.

Getting it wrong results in a few drop decisions being wrong every 2**32
bytes; easy to miss. If you have a "mis-behaving middle-box test suite"
or something I'll be happy to hear about it :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v14 7/7] sch_cake: Conditionally split GSO segments

2018-05-21 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 21785dc31acc..241bd2dbdb21 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1646,36 +1647,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

[Cake] [PATCH net-next v14 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-21 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 687fa9a38a0d..21785dc31acc 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -272,6 +272,7 @@ enum {
 
 struct cobalt_skb_cb {
ktime_t enqueue_time;
+   u32 adjusted_len;
 };
 
 static u64 us_to_ns(u64 us)
@@ -1290,6 +1291,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1367,7 +1450,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   ktime_t now, bool drop)
 {
-   u32 len = q

[Cake] [PATCH net-next v14 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-21 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1850 
 4 files changed, 1975 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..07648e6ea569 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,117 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_PAD,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v14 3/7] sch_cake: Add optional ACK filter

2018-05-21 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* bytes than the new packet being enqueued, including any
SACK options. This prevents duplicate ACKs from being filtered, to avoid
interfering with retransmission logic. In addition, we check TCP header
options and only drop those that are known to not interfere with sender
state. In particular, packets with unknown option codes are never dropped.

In aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Cc: Yuchung Cheng 
Cc: Neal Cardwell 
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  428 ++
 1 file changed, 426 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10e208e4255d..92623160d43e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -757,6 +757,407 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+

[Cake] [PATCH net-next v14 5/7] sch_cake: Add DiffServ handling

2018-05-21 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  412 +-
 1 file changed, 404 insertions(+), 8 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 04364993ce19..687fa9a38a0d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -298,6 +298,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1390,6 +1452,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v14 2/7] sch_cake: Add ingress mode

2018-05-21 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   85 --
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ea4aa261cec..10e208e4255d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   ktime_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
 
@@ -913,6 +915,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -990,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = \
+   ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = \
+   cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = \
+   ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (ktime_after(now,
+   ktime_add_ms(q->last_reconfig_time,
+250))) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;

[Cake] [PATCH net-next v14 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-21 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   79 ++
 1 file changed, 79 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 92623160d43e..04364993ce19 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -543,6 +603,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1894,6 +1957,18 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
if (err < 0)
return err;
 
+   if (tb[TCA_CAKE_NAT]) {
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+   NL_SET_ERR_MSG_ATTR(extack, "No conntrack support in kernel",
+   tb[TCA_CAKE_NAT]);
+   return -EOPNOTSUPP;
+#endif
+   }
+
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
 
@@ -2066,6 +2141,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_

[Cake] [PATCH net-next v14 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-21 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v14:
  - Handle seqno wraps and DSACKs in ACK filter

v13:
  - Avoid ktime_t to scalar compares
  - Add class dumping and basic stats
  - Fail with ENOTSUPP when requesting NAT mode and conntrack is not
available.
  - Parse all TCP options in ACK filter and make sure to only drop safe
ones. Also handle SACK ranges properly.

v12:
  - Get rid of custom time typedefs. Use ktime_t for time and u64 for
duration instead.

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 2995 
 4 files changed, 3120 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v14 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-22 Thread Toke Høiland-Jørgensen


On 22 May 2018 01:45:13 CEST, Marcelo Ricardo Leitner 
 wrote:
>On Mon, May 21, 2018 at 10:35:58PM +0200, Toke Høiland-Jørgensen wrote:
>> +static u32 cake_overhead(struct cake_sched_data *q, const struct
>sk_buff *skb)
>> +{
>> +const struct skb_shared_info *shinfo = skb_shinfo(skb);
>> +unsigned int hdr_len, last_len = 0;
>> +u32 off = skb_network_offset(skb);
>> +u32 len = qdisc_pkt_len(skb);
>> +u16 segs = 1;
>> +
>> +q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
>> +
>> +if (!shinfo->gso_size)
>> +return cake_calc_overhead(q, len, off);
>> +
>> +/* borrowed from qdisc_pkt_len_init() */
>> +hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
>> +
>> +/* + transport layer */
>> +if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
>> +SKB_GSO_TCPV6))) {
>> +const struct tcphdr *th;
>> +struct tcphdr _tcphdr;
>> +
>> +th = skb_header_pointer(skb, skb_transport_offset(skb),
>> +sizeof(_tcphdr), &_tcphdr);
>> +if (likely(th))
>> +hdr_len += __tcp_hdrlen(th);
>> +} else {
>
>I didn't see some code limiting GSO packets to just TCP or UDP. Is it
>safe to assume that this packet is an UDP one, and not SCTP or ESP,
>for example?

As the comment says, I nicked this from the qdisc init code.
So I assume it's safe? :)

>> +struct udphdr _udphdr;
>> +
>> +if (skb_header_pointer(skb, skb_transport_offset(skb),
>> +   sizeof(_udphdr), &_udphdr))
>> +hdr_len += sizeof(struct udphdr);
>> +}
>> +
>> +if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
>> +segs = DIV_ROUND_UP(skb->len - hdr_len,
>> +shinfo->gso_size);
>> +else
>> +segs = shinfo->gso_segs;
>> +
>> +len = shinfo->gso_size + hdr_len;
>> +last_len = skb->len - shinfo->gso_size * (segs - 1);
>> +
>> +return (cake_calc_overhead(q, len, off) * (segs - 1) +
>> +cake_calc_overhead(q, last_len, off));
>> +}
>> +
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v14 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-22 Thread Toke Høiland-Jørgensen


On 22 May 2018 01:34:06 CEST, Marcelo Ricardo Leitner 
 wrote:
>[Cc'ing netfilter-devel@ for awareness]

Thanks! I'll add a Cc in the next version.

>On Mon, May 21, 2018 at 10:35:58PM +0200, Toke Høiland-Jørgensen wrote:
>> When CAKE is deployed on a gateway that also performs NAT (which is a
>> common deployment mode), the host fairness mechanism cannot
>distinguish
>> internal hosts from each other, and so fails to work correctly.
>> 
>> To fix this, we add an optional NAT awareness mode, which will query
>the
>> kernel conntrack mechanism to obtain the pre-NAT addresses for each
>packet
>> and use that in the flow and host hashing.
>> 
>> When the shaper is enabled and the host is already performing NAT,
>the cost
>> of this lookup is negligible. However, in unlimited mode with no NAT
>being
>> performed, there is a significant CPU cost at higher bandwidths. For
>this
>> reason, the feature is turned off by default.
>> 
>> Signed-off-by: Toke Høiland-Jørgensen 
>> ---
>>  net/sched/sch_cake.c |   79
>++
>>  1 file changed, 79 insertions(+)
>> 
>> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
>> index 92623160d43e..04364993ce19 100644
>> --- a/net/sched/sch_cake.c
>> +++ b/net/sched/sch_cake.c
>> @@ -71,6 +71,12 @@
>>  #include 
>>  #include 
>>  
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +#include 
>> +#include 
>> +#include 
>> +#endif
>> +
>>  #define CAKE_SET_WAYS (8)
>>  #define CAKE_MAX_TINS (8)
>>  #define CAKE_QUEUES (1024)
>> @@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct
>cobalt_vars *vars,
>>  return drop;
>>  }
>>  
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +
>> +static void cake_update_flowkeys(struct flow_keys *keys,
>> + const struct sk_buff *skb)
>> +{
>> +const struct nf_conntrack_tuple *tuple;
>> +enum ip_conntrack_info ctinfo;
>> +struct nf_conn *ct;
>> +bool rev = false;
>> +
>> +if (tc_skb_protocol(skb) != htons(ETH_P_IP))
>> +return;
>> +
>> +ct = nf_ct_get(skb, &ctinfo);
>> +if (ct) {
>> +tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
>> +} else {
>> +const struct nf_conntrack_tuple_hash *hash;
>> +struct nf_conntrack_tuple srctuple;
>> +
>> +if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
>> +   NFPROTO_IPV4, dev_net(skb->dev),
>> +   &srctuple))
>> +return;
>> +
>> +hash = nf_conntrack_find_get(dev_net(skb->dev),
>> + &nf_ct_zone_dflt,
>> + &srctuple);
>> +if (!hash)
>> +return;
>> +
>> +rev = true;
>> +ct = nf_ct_tuplehash_to_ctrack(hash);
>> +tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
>> +}
>> +
>> +keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip :
>tuple->src.u3.ip;
>> +keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip :
>tuple->dst.u3.ip;
>> +
>> +if (keys->ports.ports) {
>> +keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
>> +keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
>> +}
>> +if (rev)
>> +nf_ct_put(ct);
>> +}
>> +#else
>> +static void cake_update_flowkeys(struct flow_keys *keys,
>> + const struct sk_buff *skb)
>> +{
>> +/* There is nothing we can do here without CONNTRACK */
>> +}
>> +#endif
>> +
>>  /* Cake has several subtle multiple bit settings. In these cases you
>>   *  would be matching triple isolate mode as well.
>>   */
>> @@ -543,6 +603,9 @@ static u32 cake_hash(struct cake_tin_data *q,
>const struct sk_buff *skb,
>>  skb_flow_dissect_flow_keys(skb, &keys,
>> FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
>>  
>> +if (flow_mode & CAKE_FLOW_NAT_FLAG)
>> +cake_update_flowkeys(&keys, skb);
>> +
>>  /* flow_hash_from_keys() sorts the addresses by value, so we have
>>   * to preserve their order in a separate data structure to treat
>>   * src and dst host addresses as independently selectable.

[Cake] [PATCH net-next v15 7/7] sch_cake: Conditionally split GSO segments

2018-05-22 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 87c3b01b773e..c80a7c51b792 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -82,6 +82,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1671,36 +1672,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

[Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-22 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   79 ++
 1 file changed, 79 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 68ac908470f1..6f7cae705c84 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,12 @@
 #include 
 #include 
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+#include 
+#include 
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *tuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+   bool rev = false;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   } else {
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return;
+
+   rev = true;
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   }
+
+   keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
+   keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
+   }
+   if (rev)
+   nf_ct_put(ct);
+}
+#else
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+   /* There is nothing we can do here without CONNTRACK */
+}
+#endif
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -543,6 +603,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1919,6 +1982,18 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
if (err < 0)
return err;
 
+   if (tb[TCA_CAKE_NAT]) {
+#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+   NL_SET_ERR_MSG_ATTR(extack, "No conntrack support in kernel",
+   tb[TCA_CAKE_NAT]);
+   return -EOPNOTSUPP;
+#endif
+   }
+
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
 
@@ -2091,6 +2166,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_

[Cake] [PATCH net-next v15 0/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-22 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v15:
  - Handle ECN flags in ACK filter

v14:
  - Handle seqno wraps and DSACKs in ACK filter

v13:
  - Avoid ktime_t to scalar compares
  - Add class dumping and basic stats
  - Fail with ENOTSUPP when requesting NAT mode and conntrack is not
available.
  - Parse all TCP options in ACK filter and make sure to only drop safe
ones. Also handle SACK ranges properly.

v12:
  - Get rid of custom time typedefs. Use ktime_t for time and u64 for
duration instead.

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.

---

Toke Høiland-Jørgensen (7):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/uapi/linux/pkt_sched.h |  113 +
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 3020 
 4 files changed, 3145 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v15 2/7] sch_cake: Add ingress mode

2018-05-22 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   85 --
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ea4aa261cec..10e208e4255d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   ktime_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
 
@@ -913,6 +915,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -990,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = \
+   ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = \
+   cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = \
+   ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (ktime_after(now,
+   ktime_add_ms(q->last_reconfig_time,
+250))) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;

[Cake] [PATCH net-next v15 5/7] sch_cake: Add DiffServ handling

2018-05-22 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  412 +-
 1 file changed, 404 insertions(+), 8 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 6f7cae705c84..6384765e97b0 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -298,6 +298,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1415,6 +1477,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v15 3/7] sch_cake: Add optional ACK filter

2018-05-22 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* bytes than the new packet being enqueued, including any
SACK options. This prevents duplicate ACKs from being filtered, to avoid
interfering with retransmission logic. In addition, we check TCP header
options and only drop those that are known to not interfere with sender
state. In particular, packets with unknown option codes are never dropped.

In aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Cc: Yuchung Cheng 
Cc: Neal Cardwell 
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  453 ++
 1 file changed, 451 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10e208e4255d..68ac908470f1 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -757,6 +757,432 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+

[Cake] [PATCH net-next v15 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-22 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 6384765e97b0..87c3b01b773e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -272,6 +272,7 @@ enum {
 
 struct cobalt_skb_cb {
ktime_t enqueue_time;
+   u32 adjusted_len;
 };
 
 static u64 us_to_ns(u64 us)
@@ -1315,6 +1316,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1392,7 +1475,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   ktime_t now, bool drop)
 {
-   u32 len = q

[Cake] [PATCH net-next v15 1/7] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-22 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1850 
 4 files changed, 1975 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..07648e6ea569 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,117 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_PAD,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

Re: [Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-22 Thread Toke Høiland-Jørgensen
Pablo Neira Ayuso  writes:

> Hi Toke,
>
> On Tue, May 22, 2018 at 03:57:38PM +0200, Toke Høiland-Jørgensen wrote:
>> When CAKE is deployed on a gateway that also performs NAT (which is a
>> common deployment mode), the host fairness mechanism cannot distinguish
>> internal hosts from each other, and so fails to work correctly.
>> 
>> To fix this, we add an optional NAT awareness mode, which will query the
>> kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
>> and use that in the flow and host hashing.
>> 
>> When the shaper is enabled and the host is already performing NAT, the cost
>> of this lookup is negligible. However, in unlimited mode with no NAT being
>> performed, there is a significant CPU cost at higher bandwidths. For this
>> reason, the feature is turned off by default.
>> 
>> Cc: netfilter-de...@vger.kernel.org
>> Signed-off-by: Toke Høiland-Jørgensen 
>> ---
>>  net/sched/sch_cake.c |   79 
>> ++
>>  1 file changed, 79 insertions(+)
>> 
>> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
>> index 68ac908470f1..6f7cae705c84 100644
>> --- a/net/sched/sch_cake.c
>> +++ b/net/sched/sch_cake.c
>> @@ -71,6 +71,12 @@
>>  #include 
>>  #include 
>>  
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +#include 
>> +#include 
>> +#include 
>> +#endif
>> +
>>  #define CAKE_SET_WAYS (8)
>>  #define CAKE_MAX_TINS (8)
>>  #define CAKE_QUEUES (1024)
>> @@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
>>  return drop;
>>  }
>>  
>> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> +
>> +static void cake_update_flowkeys(struct flow_keys *keys,
>> + const struct sk_buff *skb)
>> +{
>> +const struct nf_conntrack_tuple *tuple;
>> +enum ip_conntrack_info ctinfo;
>> +struct nf_conn *ct;
>> +bool rev = false;
>> +
>> +if (tc_skb_protocol(skb) != htons(ETH_P_IP))
>> +return;
>> +
>> +ct = nf_ct_get(skb, &ctinfo);
>> +if (ct) {
>> +tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
>> +} else {
>> +const struct nf_conntrack_tuple_hash *hash;
>> +struct nf_conntrack_tuple srctuple;
>> +
>> +if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
>> +   NFPROTO_IPV4, dev_net(skb->dev),
>> +   &srctuple))
>> +return;
>> +
>> +hash = nf_conntrack_find_get(dev_net(skb->dev),
>> + &nf_ct_zone_dflt,
>> + &srctuple);
>> +if (!hash)
>> +return;
>> +
>> +rev = true;
>> +ct = nf_ct_tuplehash_to_ctrack(hash);
>> +tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
>> +}
>> +
>> +keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
>> +keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
>> +
>> +if (keys->ports.ports) {
>> +keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
>> +keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
>> +}
>> +if (rev)
>> +nf_ct_put(ct);
>> +}
>
> This is going to pull in the nf_conntrack module, even if you may not
> want it, as soon as cake is in place.

Yeah, we are aware of that; we get a moddep on nf_conntrack. Our main
deployment scenario has been home routers where conntrack is used
anyway, so this has not been much of an issue. However, if there is a
way to avoid this, and instead detect at runtime if conntrack is
available, that would certainly be useful. Is there? :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v14 6/7] sch_cake: Add overhead compensation support to the rate shaper

2018-05-22 Thread Toke Høiland-Jørgensen
Marcelo Ricardo Leitner  writes:

> On Tue, May 22, 2018 at 10:44:53AM +0200, Toke Høiland-Jørgensen wrote:
>> 
>> 
>> On 22 May 2018 01:45:13 CEST, Marcelo Ricardo Leitner 
>>  wrote:
>> >On Mon, May 21, 2018 at 10:35:58PM +0200, Toke Høiland-Jørgensen wrote:
>> >> +static u32 cake_overhead(struct cake_sched_data *q, const struct
>> >sk_buff *skb)
>> >> +{
>> >> + const struct skb_shared_info *shinfo = skb_shinfo(skb);
>> >> + unsigned int hdr_len, last_len = 0;
>> >> + u32 off = skb_network_offset(skb);
>> >> + u32 len = qdisc_pkt_len(skb);
>> >> + u16 segs = 1;
>> >> +
>> >> + q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
>> >> +
>> >> + if (!shinfo->gso_size)
>> >> + return cake_calc_overhead(q, len, off);
>> >> +
>> >> + /* borrowed from qdisc_pkt_len_init() */
>> >> + hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
>> >> +
>> >> + /* + transport layer */
>> >> + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
>> >> + SKB_GSO_TCPV6))) {
>> >> + const struct tcphdr *th;
>> >> + struct tcphdr _tcphdr;
>> >> +
>> >> + th = skb_header_pointer(skb, skb_transport_offset(skb),
>> >> + sizeof(_tcphdr), &_tcphdr);
>> >> + if (likely(th))
>> >> + hdr_len += __tcp_hdrlen(th);
>> >> + } else {
>> >
>> >I didn't see some code limiting GSO packets to just TCP or UDP. Is it
>> >safe to assume that this packet is an UDP one, and not SCTP or ESP,
>> >for example?
>> 
>> As the comment says, I nicked this from the qdisc init code.
>> So I assume it's safe? :)
>
> As long as it doesn't go further than this, it is. As in, it is just
> validating if it can contain an UDP header, and if so, account for its
> size, without actually reading the header.
>
> Considering everything !TCP as UDP work as an approximation, which is
> quite accurate. SCTP header is just 4 bytes bigger than UDP header and
> is equal to ESP header size.

Yup, that seems close enough for our purposes. Thanks for explaining.
Didn't actually know that GSO handles other protocols as well :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-23 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Tue, 22 May 2018 15:57:38 +0200
>
>> When CAKE is deployed on a gateway that also performs NAT (which is a
>> common deployment mode), the host fairness mechanism cannot distinguish
>> internal hosts from each other, and so fails to work correctly.
>> 
>> To fix this, we add an optional NAT awareness mode, which will query the
>> kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
>> and use that in the flow and host hashing.
>> 
>> When the shaper is enabled and the host is already performing NAT, the cost
>> of this lookup is negligible. However, in unlimited mode with no NAT being
>> performed, there is a significant CPU cost at higher bandwidths. For this
>> reason, the feature is turned off by default.
>> 
>> Cc: netfilter-de...@vger.kernel.org
>> Signed-off-by: Toke Høiland-Jørgensen 
>
> This is really pushing the limits of what a packet scheduler can
> require for correct operation.

Well, Cake is all about pushing the limits of what a packet scheduler
can do... ;)

> And this creates an incredibly ugly dependency.

Yeah, I do agree with that, and I'd love to get rid of it. I even tried
prototyping what it would take to lookup the symbols at runtime using
kallsyms. It wasn't exactly prettier; pushed it here in case anyone
wants to recoil in horror (completely untested, just got it to the point
where the module compiles with no nf_* symbols according to objdump):

https://github.com/dtaht/sch_cake/commit/97270a10dcea236d137f5113aaeb4303098ab3f3

> I'd much rather you do something NAT method agnostic, like save or
> compute the necessary information on ingress and then later use it on
> egress.

How would this work? We would have to add some kind of global state
shared between all instances of the qdisc, and maintain state for all
flows we see going through there, effectively duplicating conntrack, and
also requiring people to run Cake on all interfaces? How is that better?

> Because what you have here will completely break when someone does NAT
> using eBPF, act_nat, or similar.
>
> There is even skb->rxhash, be creative :-)

This is not actually about improving hashing; the post-NAT information
is fine for that. It's about making sure the per-host fairness works
when NATing, so we can distribute bandwidth between the hosts on the
local LAN regardless of how many flows they open. This is one of the
"killer features" of Cake - it was the top requested feature until we
implemented it. So it would be a shame to drop it.

Since act_nat is a 1-to-1 mapping I don't think we would have any loss
of functionality with that. For eBPF, well, obviously all bets are off
as far as reusing any state. But it's not unreasonable to expect people
who do NAT in eBPF to also set skb->tc_classid if they want pre-nat host
fairness, is it?

Which means that the only remaining issue is the module dependency. Can
we live with that (noting that it'll go away if conntrack is configured
out of the kernel entirely)? Or is the kallsyms approach a viable way
forward? I guess we could add a kconfig option that toggles between that
and native calls, so that we'd at least get a compile error on suitably
configured kernels if the API changes...

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-23 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Wed, 23 May 2018 22:38:30 +0200
>
>> How would this work?
>
> On egress the core networking flow dissector records what you need
> somewhere in SKB or wherever.  You later retrieve it at egress time
> after NAT has occurred.

Ah, right, that could work. Is there any particular field in sk_buff
we should stomp on for this purpose, or would you prefer a new one?
Looking through it, the only obvious one that comes to mind is, well,
skb->_nfct :)

If we wanted to avoid bloating sk_buff, we could add a union with that,
fill it in the flow dissector, and just let conntrack overwrite it if
active; then detect which is which in Cake, and read the data we need
from _nfct if conntrack is active, and from what the flow dissector
stored otherwise.

Is that too many hoops to jump through to avoid adding an extra field?

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-23 Thread Toke Høiland-Jørgensen
David Miller  writes:

> From: Toke Høiland-Jørgensen 
> Date: Wed, 23 May 2018 23:05:16 +0200
>
>> Ah, right, that could work. Is there any particular field in sk_buff
>> we should stomp on for this purpose, or would you prefer a new one?
>> Looking through it, the only obvious one that comes to mind is, well,
>> skb->_nfct :)
>> 
>> If we wanted to avoid bloating sk_buff, we could add a union with that,
>> fill it in the flow dissector, and just let conntrack overwrite it if
>> active; then detect which is which in Cake, and read the data we need
>> from _nfct if conntrack is active, and from what the flow dissector
>> stored otherwise.
>> 
>> Is that too many hoops to jump through to avoid adding an extra field?
>
> Space is precious in sk_buff, so yes avoid adding new members at all
> costs.
>
> How much info do you need exactly?

We use a u32 hash (from flow_hash_from_keys()) on the source address.
Ideally we'd want that; but we could get away with less if we are
willing to accept more hash collisions; we just need to map the source
address into a hash bucket. We currently have 1024 of those, so 10 bits
would suffice if we just drop the set-associative hashing for source
hosts.

Or maybe 16 bits to be on the safe side? It really is a pretty
straight-forward tradeoff between space and collision probability.


Hmm, and we still have an issue with ingress filtering (where cake is
running on an ifb interface). That runs pre-NAT in the conntrack case,
and we can't do the RX trick. Here we do the lookup manually in
conntrack (and this part is actually what brings in most of the
dependencies). Any neat tricks up your sleeve for this case? :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


Re: [Cake] [PATCH net-next v15 4/7] sch_cake: Add NAT awareness to packet classifier

2018-05-23 Thread Toke Høiland-Jørgensen
Pablo Neira Ayuso  writes:

> On Tue, May 22, 2018 at 04:11:06PM +0200, Toke Høiland-Jørgensen wrote:
>> Pablo Neira Ayuso  writes:
>> 
>> > Hi Toke,
>> >
>> > On Tue, May 22, 2018 at 03:57:38PM +0200, Toke Høiland-Jørgensen wrote:
>> >> When CAKE is deployed on a gateway that also performs NAT (which is a
>> >> common deployment mode), the host fairness mechanism cannot distinguish
>> >> internal hosts from each other, and so fails to work correctly.
>> >> 
>> >> To fix this, we add an optional NAT awareness mode, which will query the
>> >> kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
>> >> and use that in the flow and host hashing.
>> >> 
>> >> When the shaper is enabled and the host is already performing NAT, the 
>> >> cost
>> >> of this lookup is negligible. However, in unlimited mode with no NAT being
>> >> performed, there is a significant CPU cost at higher bandwidths. For this
>> >> reason, the feature is turned off by default.
>> >> 
>> >> Cc: netfilter-de...@vger.kernel.org
>> >> Signed-off-by: Toke Høiland-Jørgensen 
>> >> ---
>> >>  net/sched/sch_cake.c |   79 
>> >> ++
>> >>  1 file changed, 79 insertions(+)
>> >> 
>> >> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
>> >> index 68ac908470f1..6f7cae705c84 100644
>> >> --- a/net/sched/sch_cake.c
>> >> +++ b/net/sched/sch_cake.c
>> >> @@ -71,6 +71,12 @@
>> >>  #include 
>> >>  #include 
>> >>  
>> >> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> >> +#include 
>> >> +#include 
>> >> +#include 
>> >> +#endif
>> >> +
>> >>  #define CAKE_SET_WAYS (8)
>> >>  #define CAKE_MAX_TINS (8)
>> >>  #define CAKE_QUEUES (1024)
>> >> @@ -516,6 +522,60 @@ static bool cobalt_should_drop(struct cobalt_vars 
>> >> *vars,
>> >>   return drop;
>> >>  }
>> >>  
>> >> +#if IS_REACHABLE(CONFIG_NF_CONNTRACK)
>> >> +
>> >> +static void cake_update_flowkeys(struct flow_keys *keys,
>> >> +  const struct sk_buff *skb)
>> >> +{
>> >> + const struct nf_conntrack_tuple *tuple;
>> >> + enum ip_conntrack_info ctinfo;
>> >> + struct nf_conn *ct;
>> >> + bool rev = false;
>> >> +
>> >> + if (tc_skb_protocol(skb) != htons(ETH_P_IP))
>> >> + return;
>> >> +
>> >> + ct = nf_ct_get(skb, &ctinfo);
>> >> + if (ct) {
>> >> + tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
>> >> + } else {
>> >> + const struct nf_conntrack_tuple_hash *hash;
>> >> + struct nf_conntrack_tuple srctuple;
>> >> +
>> >> + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
>> >> +NFPROTO_IPV4, dev_net(skb->dev),
>> >> +&srctuple))
>> >> + return;
>> >> +
>> >> + hash = nf_conntrack_find_get(dev_net(skb->dev),
>> >> +  &nf_ct_zone_dflt,
>> >> +  &srctuple);
>> >> + if (!hash)
>> >> + return;
>> >> +
>> >> + rev = true;
>> >> + ct = nf_ct_tuplehash_to_ctrack(hash);
>> >> + tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
>> >> + }
>> >> +
>> >> + keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip;
>> >> + keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip;
>> >> +
>> >> + if (keys->ports.ports) {
>> >> + keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all;
>> >> + keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all;
>> >> + }
>> >> + if (rev)
>> >> + nf_ct_put(ct);
>> >> +}
>> >
>> > This is going to pull in the nf_conntrack module, even if you may not
>> > want it, as soon as cake is in place.
>> 
>> Yeah, we are aware of that; we get a moddep on nf_conntrack. Our main
>> deployment scenario has been home routers where conntrack is used
>> anyway, so this has not been much of an issue. However, if there is a
>> way to avoid this, and instead detect at runtime if conntrack is
>> available, that would certainly be useful. Is there? :)
>
> Yes, there is.
>
> You place this function in net/netfilter/nf_conntrack_core.c, call it
> nf_conntrack_get_tuple() which internally uses a rcu hook for this.
> See nf_ct_attach() and ip_ct_attach() in net/netfilter/core.c for
> instance.
>
> This allows you to avoid the dependency with nf_conntrack (which would
> be only called if the module has been explicitly loaded), which is
> what you're searching for.

Ah, awesome! I'll look into that; thanks :)

-Toke
___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v16 8/8] sch_cake: Conditionally split GSO segments

2018-05-28 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 54cde99fc0ad..2b9ec946250f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -80,6 +80,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1638,36 +1639,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

[Cake] [PATCH net-next v16 6/8] sch_cake: Add DiffServ handling

2018-05-28 Thread Toke Høiland-Jørgensen
This adds support for DiffServ-based priority queueing to CAKE. If the
shaper is in use, each priority tier gets its own virtual clock, which
limits that tier's rate to a fraction of the overall shaped rate, to
discourage trying to game the priority mechanism.

CAKE defaults to a simple, three-tier mode that interprets most code points
as "best effort", but places CS1 traffic into a low-priority "bulk" tier
which is assigned 1/16 of the total rate, and a few code points indicating
latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7)
into a "latency sensitive" high-priority tier, which is assigned 1/4 rate.
The other supported DiffServ modes are a 4-tier mode matching the 802.11e
precedence rules, as well as two 8-tier modes, one of which implements
strict precedence of the eight priority levels.

This commit also adds an optional DiffServ 'wash' mode, which will zero out
the DSCP fields of any packet passing through CAKE. While this can
technically be done with other mechanisms in the kernel, having the feature
available in CAKE significantly decreases configuration complexity; and the
implementation cost is low on top of the other DiffServ-handling code.

Filters and applications can set the skb->priority field to override the
DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches
CAKE's qdisc handle, the minor number will be interpreted as a priority
tier if it is less than or equal to the number of configured priority
tiers.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  412 +-
 1 file changed, 404 insertions(+), 8 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index fecd9caac0cc..5d9fdfd083c9 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -296,6 +296,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb,
 
 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
 
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   1, 1, 1, 1, 1, 1, 1, 1,
+   2, 2, 2, 2, 2, 2, 2, 2,
+   3, 3, 3, 3, 3, 3, 3, 3,
+   4, 4, 4, 4, 4, 4, 4, 4,
+   5, 5, 5, 5, 5, 5, 5, 5,
+   6, 6, 6, 6, 6, 6, 6, 6,
+   7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+   2, 5, 1, 2, 4, 2, 2, 2,
+   0, 2, 1, 2, 1, 2, 1, 2,
+   5, 2, 4, 2, 4, 2, 4, 2,
+   3, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 3, 2, 3, 2, 3, 2,
+   6, 2, 2, 2, 6, 2, 6, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+   7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+   0, 2, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   2, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 2, 0, 2, 0, 2, 0,
+   3, 0, 0, 0, 3, 0, 3, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+   3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+   0, 0, 0, 0, 2, 0, 0, 0,
+   1, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 2, 0, 2, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+   2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
 #define REC_INV_SQRT_CACHE (16)
 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
 
@@ -1382,6 +1444,46 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
return idx + (tin << 16);
 }
 
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   case htons(ETH_P_IPV6):
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   break;
+   default:
+   break;
+   }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+   u8 dscp;
+
+   switch (skb->protocol) {
+   case htons(ETH_P_IP):
+   dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_IPV6):
+   dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+   if (wash && dscp)
+   ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+   return dscp;
+
+   case htons(ETH_P_ARP):
+   return 0x38;  /* CS7 - Net Cont

[Cake] [PATCH net-next v16 4/8] netfilter: Add nf_ct_get_tuple_skb callback

2018-05-28 Thread Toke Høiland-Jørgensen
This adds a callback to netfilter to extract a conntrack tuple from an skb
that works before the _nfct skb field has been initialised (e.g., in an
ingress qdisc). The tuple is copied to the caller to avoid issues with
reference counting.

The callback will return false when conntrack is not loaded, allowing it to
be accessed without incurring a module dependency on conntrack. This is
used by the NAT mode in sch_cake.

Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/linux/netfilter.h |6 ++
 net/netfilter/core.c  |   21 +
 net/netfilter/nf_conntrack_core.c |   37 +
 3 files changed, 64 insertions(+)

diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 85a1a0b32c66..7cbe7e9ce527 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -375,6 +375,12 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi 
*fl, u_int8_t family)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+
+struct nf_conntrack_tuple;
+extern bool (*skb_ct_get_tuple)(struct nf_conntrack_tuple *,
+   const struct sk_buff *) __rcu;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+const struct sk_buff *skb);
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 0f6b8172fb9a..520565198f0e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -572,6 +572,27 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
 }
 EXPORT_SYMBOL(nf_conntrack_destroy);
 
+bool (*skb_ct_get_tuple)(struct nf_conntrack_tuple *,
+const struct sk_buff *) __rcu __read_mostly;
+EXPORT_SYMBOL(skb_ct_get_tuple);
+
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+const struct sk_buff *skb)
+{
+   bool (*get_tuple)(const struct sk_buff *, struct nf_conntrack_tuple *);
+   bool ret = false;
+
+   rcu_read_lock();
+   get_tuple = rcu_dereference(skb_ct_get_tuple);
+   if (!get_tuple)
+   goto out;
+   ret = get_tuple(dst_tuple, skb);
+out:
+   rcu_read_unlock();
+   return ret;
+}
+EXPORT_SYMBOL(nf_ct_get_tuple_skb);
+
 /* Built-in default zone used e.g. by modules. */
 const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID,
diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index 41ff04ee2554..eee5c76f638c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1611,6 +1611,41 @@ static void nf_conntrack_attach(struct sk_buff *nskb, 
const struct sk_buff *skb)
nf_conntrack_get(skb_nfct(nskb));
 }
 
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+  const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *src_tuple;
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+   return true;
+   }
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return false;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return false;
+
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+   nf_ct_put(ct);
+
+   return true;
+}
+
 /* Bring out ya dead! */
 static struct nf_conn *
 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
@@ -1808,6 +1843,7 @@ void nf_conntrack_cleanup_start(void)
 {
conntrack_gc_work.exiting = true;
RCU_INIT_POINTER(ip_ct_attach, NULL);
+   RCU_INIT_POINTER(skb_ct_get_tuple, NULL);
 }
 
 void nf_conntrack_cleanup_end(void)
@@ -2135,6 +2171,7 @@ void nf_conntrack_init_end(void)
/* For use by REJECT target */
RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
+   RCU_INIT_POINTER(skb_ct_get_tuple, nf_conntrack_get_tuple_skb);
 }
 
 /*

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v16 5/8] sch_cake: Add NAT awareness to packet classifier

2018-05-28 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   46 ++
 1 file changed, 46 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 68ac908470f1..fecd9caac0cc 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,10 @@
 #include 
 #include 
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -516,6 +520,29 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+   struct nf_conntrack_tuple tuple = {};
+   bool rev = !skb->_nfct;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   if (!nf_ct_get_tuple_skb(&tuple, skb))
+   return;
+
+   keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
+   keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
+   }
+#endif
+}
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -543,6 +570,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1919,6 +1949,18 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
if (err < 0)
return err;
 
+   if (tb[TCA_CAKE_NAT]) {
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+   NL_SET_ERR_MSG_ATTR(extack, "No conntrack support in kernel",
+   tb[TCA_CAKE_NAT]);
+   return -EOPNOTSUPP;
+#endif
+   }
+
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
 
@@ -2091,6 +2133,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v16 2/8] sch_cake: Add ingress mode

2018-05-28 Thread Toke Høiland-Jørgensen
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   85 --
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ea4aa261cec..10e208e4255d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
   struct cobalt_params *p,
   ktime_t now,
-  struct sk_buff *skb)
+  struct sk_buff *skb,
+  u32 bulk_flows)
 {
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
  sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
 
@@ -913,6 +915,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct 
sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
 
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
 
@@ -990,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
cake_heapify_up(q, b->overflow_idx[idx]);
 
/* incoming bandwidth capacity estimate */
-   q->avg_window_bytes = 0;
-   q->last_packet_time = now;
+   if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+   u64 packet_interval = \
+   ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+   if (packet_interval > NSEC_PER_SEC)
+   packet_interval = NSEC_PER_SEC;
+
+   /* filter out short-term bursts, eg. wifi aggregation */
+   q->avg_packet_interval = \
+   cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+   q->last_packet_time = now;
+
+   if (packet_interval > q->avg_packet_interval) {
+   u64 window_interval = \
+   ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+   u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+   do_div(b, window_interval);
+   q->avg_peak_bandwidth =
+   cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+   q->avg_window_bytes = 0;
+   q->avg_window_begin = now;
+
+   if (ktime_after(now,
+   ktime_add_ms(q->last_reconfig_time,
+250))) {
+   q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+   cake_reconfigure(sch);
+   }
+   }
+   } else {
+   q->avg_window_bytes = 0;
+   q->last_packet_time = now;

[Cake] [PATCH net-next v16 1/8] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-28 Thread Toke Høiland-Jørgensen
sch_cake targets the home router use case and is intended to squeeze the
most bandwidth and latency out of even the slowest ISP links and routers,
while presenting an API simple enough that even an ISP can configure it.

Example of use on a cable ISP uplink:

tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter

To shape a cable download link (ifb and tc-mirred setup elided)

tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash

CAKE is filled with:

* A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel
  derived Flow Queuing system, which autoconfigures based on the bandwidth.
* A novel "triple-isolate" mode (the default) which balances per-host
  and per-flow FQ even through NAT.
* An deficit based shaper, that can also be used in an unlimited mode.
* 8 way set associative hashing to reduce flow collisions to a minimum.
* A reasonable interpretation of various diffserv latency/loss tradeoffs.
* Support for zeroing diffserv markings for entering and exiting traffic.
* Support for interacting well with Docsis 3.0 shaper framing.
* Extensive support for DSL framing types.
* Support for ack filtering.
* Extensive statistics for measuring, loss, ecn markings, latency
  variation.

A paper describing the design of CAKE is available at
https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE
International Symposium on Local and Metropolitan Area Networks (LANMAN).

This patch adds the base shaper and packet scheduler, while subsequent
commits add the optional (configurable) features. The full userspace API
and most data structures are included in this commit, but options not
understood in the base version will be ignored.

Various versions baking have been available as an out of tree build for
kernel versions going back to 3.10, as the embedded router world has been
running a few years behind mainline Linux. A stable version has been
generally available on lede-17.01 and later.

sch_cake replaces a combination of iptables, tc filter, htb and fq_codel
in the sqm-scripts, with sane defaults and vastly simpler configuration.

CAKE's principal author is Jonathan Morton, with contributions from
Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller,
Ryan Mounce, Guido Sarducci, Dean Scarff, Nils Andreas Svee, Dave Täht,
and Loganaden Velvindron.

Testing from Pete Heist, Georgios Amanakis, and the many other members of
the cake@lists.bufferbloat.net mailing list.

tc -s qdisc show dev eth2
qdisc cake 1: root refcnt 2 bandwidth 100Mbit diffserv3 triple-isolate rtt 
100.0ms raw overhead 0
 Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 0b 0p requeues 0
 memory used: 0b of 500b
 capacity estimate: 100Mbit
 min/max network layer size:65535 /   0
 min/max overhead-adjusted size:65535 /   0
 average network hdr offset:0

   Bulk  Best EffortVoice
  thresh   6250Kbit  100Mbit   25Mbit
  target  5.0ms5.0ms5.0ms
  interval  100.0ms  100.0ms  100.0ms
  pk_delay  0us  0us  0us
  av_delay  0us  0us  0us
  sp_delay  0us  0us  0us
  pkts000
  bytes   000
  way_inds000
  way_miss000
  way_cols000
  drops   000
  marks   000
  ack_drop000
  sp_flows000
  bk_flows000
  un_flows000
  max_len 000
  quantum   300 1514  762

Tested-by: Pete Heist 
Tested-by: Georgios Amanakis 
Signed-off-by: Dave Taht 
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/uapi/linux/pkt_sched.h |  113 ++
 net/sched/Kconfig  |   11 
 net/sched/Makefile |1 
 net/sched/sch_cake.c   | 1850 
 4 files changed, 1975 insertions(+)
 create mode 100644 net/sched/sch_cake.c

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..07648e6ea569 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -934,4 +934,117 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+/* CAKE */
+enum {
+   TCA_CAKE_UNSPEC,
+   TCA_CAKE_PAD,
+   TCA_CAKE_BASE_RATE64,
+   TCA_CAKE_DIFFSERV_MODE,
+   TCA_CAKE_ATM,
+   TCA_CAKE_FLOW_MODE,
+   TCA_CAKE_OVERHEAD,
+   TCA_CAKE_RTT,
+   TCA_CAKE_TARGET,
+   TCA_CAKE_AUTORATE,
+   TCA_CAKE_MEMORY,
+   TCA_CAKE_NAT,
+   TCA_CAKE_RAW,
+   TCA_CAKE_WASH,
+   TCA_CAKE_MPU,
+   TCA_CAKE_INGRESS,
+ 

[Cake] [PATCH net-next v16 7/8] sch_cake: Add overhead compensation support to the rate shaper

2018-05-28 Thread Toke Høiland-Jørgensen
This commit adds configurable overhead compensation support to the rate
shaper. With this feature, userspace can configure the actual bottleneck
link overhead and encapsulation mode used, which will be used by the shaper
to calculate the precise duration of each packet on the wire.

This feature is needed because CAKE is often deployed one or two hops
upstream of the actual bottleneck (which can be, e.g., inside a DSL or
cable modem). In this case, the link layer characteristics and overhead
reported by the kernel does not match the actual bottleneck. Being able to
set the actual values in use makes it possible to configure the shaper rate
much closer to the actual bottleneck rate (our experience shows it is
possible to get with 0.1% of the actual physical bottleneck rate), thus
keeping latency low without sacrificing bandwidth.

The overhead compensation has three tunables: A fixed per-packet overhead
size (which, if set, will be accounted from the IP packet header), a
minimum packet size (MPU) and a framing mode supporting either ATM or PTM
framing. We include a set of common keywords in TC to help users configure
the right parameters. If no overhead value is set, the value reported by
the kernel is used.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  124 ++
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 5d9fdfd083c9..54cde99fc0ad 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -270,6 +270,7 @@ enum {
 
 struct cobalt_skb_cb {
ktime_t enqueue_time;
+   u32 adjusted_len;
 };
 
 static u64 us_to_ns(u64 us)
@@ -1282,6 +1283,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
 }
 
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+   if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+   len -= off;
+
+   if (q->max_netlen < len)
+   q->max_netlen = len;
+   if (q->min_netlen > len)
+   q->min_netlen = len;
+
+   len += q->rate_overhead;
+
+   if (len < q->rate_mpu)
+   len = q->rate_mpu;
+
+   if (q->atm_mode == CAKE_ATM_ATM) {
+   len += 47;
+   len /= 48;
+   len *= 53;
+   } else if (q->atm_mode == CAKE_ATM_PTM) {
+   /* Add one byte per 64 bytes or part thereof.
+* This is conservative and easier to calculate than the
+* precise value.
+*/
+   len += (len + 63) / 64;
+   }
+
+   if (q->max_adjlen < len)
+   q->max_adjlen = len;
+   if (q->min_adjlen > len)
+   q->min_adjlen = len;
+
+   return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+   const struct skb_shared_info *shinfo = skb_shinfo(skb);
+   unsigned int hdr_len, last_len = 0;
+   u32 off = skb_network_offset(skb);
+   u32 len = qdisc_pkt_len(skb);
+   u16 segs = 1;
+
+   q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+   if (!shinfo->gso_size)
+   return cake_calc_overhead(q, len, off);
+
+   /* borrowed from qdisc_pkt_len_init() */
+   hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+   /* + transport layer */
+   if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+   SKB_GSO_TCPV6))) {
+   const struct tcphdr *th;
+   struct tcphdr _tcphdr;
+
+   th = skb_header_pointer(skb, skb_transport_offset(skb),
+   sizeof(_tcphdr), &_tcphdr);
+   if (likely(th))
+   hdr_len += __tcp_hdrlen(th);
+   } else {
+   struct udphdr _udphdr;
+
+   if (skb_header_pointer(skb, skb_transport_offset(skb),
+  sizeof(_udphdr), &_udphdr))
+   hdr_len += sizeof(struct udphdr);
+   }
+
+   if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+   segs = DIV_ROUND_UP(skb->len - hdr_len,
+   shinfo->gso_size);
+   else
+   segs = shinfo->gso_segs;
+
+   len = shinfo->gso_size + hdr_len;
+   last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+   return (cake_calc_overhead(q, len, off) * (segs - 1) +
+   cake_calc_overhead(q, last_len, off));
+}
+
 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
 {
struct cake_heap_entry ii = q->overflow_heap[i];
@@ -1359,7 +1442,7 @@ static int cake_advance_shaper(struct cake_sched_data *q,
   struct sk_buff *skb,
   ktime_t now, bool drop)
 {
-   u32 len = q

[Cake] [PATCH net-next v16 0/8] sched: Add Common Applications Kept Enhanced (cake) qdisc

2018-05-28 Thread Toke Høiland-Jørgensen
This patch series adds the CAKE qdisc, and has been split up to ease
review.

I have attempted to split out each configurable feature into its own patch.
The first commit adds the base shaper and packet scheduler, while
subsequent commits add the optional features. The full userspace API and
most data structures are included in this commit, but options not
understood in the base version will be ignored.

The result of applying the entire series is identical to the out of tree
version that have seen extensive testing in previous deployments, most
notably as an out of tree patch to OpenWrt. However, note that I have only
compile tested the individual patches; so the whole series should be
considered as a unit.

---
Changelog

v16:
  - Move conntrack lookup function into conntrack core and read it via
RCU so it is only active when the nf_conntrack module is loaded.
This avoids the module dependency on conntrack for NAT mode. Thanks
to Pablo for the idea.

v15:
  - Handle ECN flags in ACK filter

v14:
  - Handle seqno wraps and DSACKs in ACK filter

v13:
  - Avoid ktime_t to scalar compares
  - Add class dumping and basic stats
  - Fail with ENOTSUPP when requesting NAT mode and conntrack is not
available.
  - Parse all TCP options in ACK filter and make sure to only drop safe
ones. Also handle SACK ranges properly.

v12:
  - Get rid of custom time typedefs. Use ktime_t for time and u64 for
duration instead.

v11:
  - Fix overhead compensation calculation for GSO packets
  - Change configured rate to be u64 (I ran out of bits before I ran out
of CPU when testing the effects of the above)

v10:
  - Christmas tree gardening (fix variable declarations to be in reverse
line length order)

v9:
  - Remove duplicated checks around kvfree() and just call it
unconditionally.
  - Don't pass __GFP_NOWARN when allocating memory
  - Move options in cake_dump() that are related to optional features to
later patches implementing the features.
  - Support attaching filters to the qdisc and use the classification
result to select flow queue.
  - Support overriding diffserv priority tin from skb->priority

v8:
  - Remove inline keyword from function definitions
  - Simplify ACK filter; remove the complex state handling to make the
logic easier to follow. This will potentially be a bit less efficient,
but I have not been able to measure a difference.

v7:
  - Split up patch into a series to ease review.
  - Constify the ACK filter.

v6:
  - Fix 6in4 encapsulation checks in ACK filter code
  - Checkpatch fixes

v5:
  - Refactor ACK filter code and hopefully fix the safety issues
properly this time.

v4:
  - Only split GSO packets if shaping at speeds <= 1Gbps
  - Fix overhead calculation code to also work for GSO packets
  - Don't re-implement kvzalloc()
  - Remove local header include from out-of-tree build (fixes kbuild-bot
complaint).
  - Several fixes to the ACK filter:
- Check pskb_may_pull() before deref of transport headers.
- Don't run ACK filter logic on split GSO packets
- Fix TCP sequence number compare to deal with wraparounds

v3:
  - Use IS_REACHABLE() macro to fix compilation when sch_cake is
built-in and conntrack is a module.
  - Switch the stats output to use nested netlink attributes instead
of a versioned struct.
  - Remove GPL boilerplate.
  - Fix array initialisation style.

v2:
  - Fix kbuild test bot complaint
  - Clean up the netlink ABI
  - Fix checkpatch complaints
  - A few tweaks to the behaviour of cake based on testing carried out
while writing the paper.


---

Toke Høiland-Jørgensen (8):
  sched: Add Common Applications Kept Enhanced (cake) qdisc
  sch_cake: Add ingress mode
  sch_cake: Add optional ACK filter
  netfilter: Add nf_ct_get_tuple_skb callback
  sch_cake: Add NAT awareness to packet classifier
  sch_cake: Add DiffServ handling
  sch_cake: Add overhead compensation support to the rate shaper
  sch_cake: Conditionally split GSO segments


 include/linux/netfilter.h |6 
 include/uapi/linux/pkt_sched.h|  113 +
 net/netfilter/core.c  |   21 
 net/netfilter/nf_conntrack_core.c |   37 
 net/sched/Kconfig |   11 
 net/sched/Makefile|1 
 net/sched/sch_cake.c  | 2987 +
 7 files changed, 3176 insertions(+)
 create mode 100644 net/sched/sch_cake.c

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


[Cake] [PATCH net-next v16 3/8] sch_cake: Add optional ACK filter

2018-05-28 Thread Toke Høiland-Jørgensen
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* bytes than the new packet being enqueued, including any
SACK options. This prevents duplicate ACKs from being filtered, to avoid
interfering with retransmission logic. In addition, we check TCP header
options and only drop those that are known to not interfere with sender
state. In particular, packets with unknown option codes are never dropped.

In aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Cc: Yuchung Cheng 
Cc: Neal Cardwell 
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |  453 ++
 1 file changed, 451 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10e208e4255d..68ac908470f1 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -757,6 +757,432 @@ static void flow_queue_add(struct cake_flow *flow, struct 
sk_buff *skb)
skb->next = NULL;
 }
 
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+   struct ipv6hdr *buf)
+{
+   unsigned int offset = skb_network_offset(skb);
+   struct iphdr *iph;
+
+   iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+   if (!iph)
+   return NULL;
+
+   if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+   return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+   else if (iph->version == 4)
+   return iph;
+
+   else if (iph->version == 6)
+   return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+   return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+   unsigned int offset = skb_network_offset(skb);
+   const struct ipv6hdr *ipv6h;
+   const struct tcphdr *tcph;
+   const struct iphdr *iph;
+   struct ipv6hdr _ipv6h;
+   struct tcphdr _tcph;
+
+   ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+   if (!ipv6h)
+   return NULL;
+
+   if (ipv6h->version == 4) {
+   iph = (struct iphdr *)ipv6h;
+   offset += iph->ihl * 4;
+

[Cake] [PATCH net-next v17 4/8] netfilter: Add nf_ct_get_tuple_skb global lookup function

2018-05-28 Thread Toke Høiland-Jørgensen
This adds a global netfilter function to extract a conntrack tuple from an
skb. The function uses a new function added to nf_ct_hook, which will try
to get the tuple from skb->_nfct, and do a full lookup if that fails. This
makes it possible to use the lookup function before the skb has passed
through the conntrack init hooks (e.g., in an ingress qdisc). The tuple is
copied to the caller to avoid issues with reference counting.

The function returns false if conntrack is not loaded, allowing it to be
used without incurring a module dependency on conntrack. This is used by
the NAT mode in sch_cake.

Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen 
---
 include/linux/netfilter.h |   11 +++
 net/netfilter/core.c  |   15 +++
 net/netfilter/nf_conntrack_core.c |   36 
 3 files changed, 62 insertions(+)

diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 04551af2ff23..d7be35ab7967 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -388,8 +388,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi 
*fl, u_int8_t family)
 
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+const struct sk_buff *skb);
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+  const struct sk_buff *skb)
+{
+   return false;
+}
 #endif
 
 struct nf_conn;
@@ -398,6 +407,8 @@ enum ip_conntrack_info;
 struct nf_ct_hook {
int (*update)(struct net *net, struct sk_buff *skb);
void (*destroy)(struct nf_conntrack *);
+   bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ const struct sk_buff *);
 };
 extern struct nf_ct_hook __rcu *nf_ct_hook;
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 168af54db975..dc240cb47ddf 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -603,6 +603,21 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
 }
 EXPORT_SYMBOL(nf_conntrack_destroy);
 
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+const struct sk_buff *skb)
+{
+   struct nf_ct_hook *ct_hook;
+   bool ret = false;
+
+   rcu_read_lock();
+   ct_hook = rcu_dereference(nf_ct_hook);
+   if (ct_hook)
+   ret = ct_hook->get_tuple_skb(dst_tuple, skb);
+   rcu_read_unlock();
+   return ret;
+}
+EXPORT_SYMBOL(nf_ct_get_tuple_skb);
+
 /* Built-in default zone used e.g. by modules. */
 const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID,
diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index 3465da2a98bd..85ab2fd6a665 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1683,6 +1683,41 @@ static int nf_conntrack_update(struct net *net, struct 
sk_buff *skb)
return 0;
 }
 
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+  const struct sk_buff *skb)
+{
+   const struct nf_conntrack_tuple *src_tuple;
+   const struct nf_conntrack_tuple_hash *hash;
+   struct nf_conntrack_tuple srctuple;
+   enum ip_conntrack_info ctinfo;
+   struct nf_conn *ct;
+
+   ct = nf_ct_get(skb, &ctinfo);
+   if (ct) {
+   src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+   memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+   return true;
+   }
+
+   if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+  NFPROTO_IPV4, dev_net(skb->dev),
+  &srctuple))
+   return false;
+
+   hash = nf_conntrack_find_get(dev_net(skb->dev),
+&nf_ct_zone_dflt,
+&srctuple);
+   if (!hash)
+   return false;
+
+   ct = nf_ct_tuplehash_to_ctrack(hash);
+   src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+   memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+   nf_ct_put(ct);
+
+   return true;
+}
+
 /* Bring out ya dead! */
 static struct nf_conn *
 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
@@ -2204,6 +2239,7 @@ int nf_conntrack_init_start(void)
 static struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
.destroy= destroy_conntrack,
+   .get_tuple_skb  = nf_conntrack_get_tuple_skb,
 };
 
 void nf_conntrack_init_end(void)

___
Cake mailing list
C

[Cake] [PATCH net-next v17 8/8] sch_cake: Conditionally split GSO segments

2018-05-28 Thread Toke Høiland-Jørgensen
At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   99 +-
 1 file changed, 73 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10a968d43d44..13256b6e2440 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -80,6 +80,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (12500) /* 1Gbps */
 
 /* struct cobalt_params - contains codel and blue parameters
  * @interval:  codel initial drop rate
@@ -1638,36 +1639,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
 
-   cobalt_set_enqueue_time(skb, now);
-   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-   flow_queue_add(flow, skb);
-
-   if (q->ack_filter)
-   ack = cake_ack_filter(q, flow);
+   if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+   struct sk_buff *segs, *nskb;
+   netdev_features_t features = netif_skb_features(skb);
+   unsigned int slen = 0;
+
+   segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+   if (IS_ERR_OR_NULL(segs))
+   return qdisc_drop(skb, sch, to_free);
+
+   while (segs) {
+   nskb = segs->next;
+   segs->next = NULL;
+   qdisc_skb_cb(segs)->pkt_len = segs->len;
+   cobalt_set_enqueue_time(segs, now);
+   get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+   flow_queue_add(flow, segs);
+
+   sch->q.qlen++;
+   slen += segs->len;
+   q->buffer_used += segs->truesize;
+   b->packets++;
+   segs = nskb;
+   }
 
-   if (ack) {
-   b->ack_drops++;
-   sch->qstats.drops++;
-   b->bytes += qdisc_pkt_len(ack);
-   len -= qdisc_pkt_len(ack);
-   q->buffer_used += skb->truesize - ack->truesize;
-   if (q->rate_flags & CAKE_FLAG_INGRESS)
-   cake_advance_shaper(q, b, ack, now, true);
+   /* stats */
+   b->bytes+= slen;
+   b->backlogs[idx]+= slen;
+   b->tin_backlog  += slen;
+   sch->qstats.backlog += slen;
+   q->avg_window_bytes += slen;
 
-   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-   consume_skb(ack);
+   qdisc_tree_reduce_backlog(sch, 1, len);
+   consume_skb(skb);
} else {
-   sch->q.qlen++;
-   q->buffer_used  += skb->truesize;
-   }
+   /* not splitting */
+   cobalt_set_enqueue_time(skb, now);
+   get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+   flow_queue_add(flow, skb);
+
+   if (q->ack_filter)
+   ack = cake_ack_filter(q, flow);
+
+   if (ack) {
+   b->ack_drops++;
+   sch->qstats.drops++;
+   b->bytes += qdisc_pkt_len(ack);
+   len -= qdisc_pkt_len(ack);
+   q->buffer_used += skb->truesize - ack->truesize;
+   if (q->rate_flags & CAKE_FLAG_INGRESS)
+   cake_advance_shaper(q, b, ack, now, true);
+
+   qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+   consume_skb(ack);
+   } else {
+   sch->q.qlen++;
+   q->buffer_used  += skb->truesize;
+   }
 
-   /* stats */
-   b->packets++;
-   b->bytes+= len;
-   b->backlogs[idx]+= len;
-   b->tin_backlog  += len;
-   sch->qstats.backlog += len;
-   q->avg_window_bytes += len;
+   /* stats */
+   b->packets++;
+   b->bytes+= len;
+   b->backlogs[idx]

[Cake] [PATCH net-next v17 5/8] sch_cake: Add NAT awareness to packet classifier

2018-05-28 Thread Toke Høiland-Jørgensen
When CAKE is deployed on a gateway that also performs NAT (which is a
common deployment mode), the host fairness mechanism cannot distinguish
internal hosts from each other, and so fails to work correctly.

To fix this, we add an optional NAT awareness mode, which will query the
kernel conntrack mechanism to obtain the pre-NAT addresses for each packet
and use that in the flow and host hashing.

When the shaper is enabled and the host is already performing NAT, the cost
of this lookup is negligible. However, in unlimited mode with no NAT being
performed, there is a significant CPU cost at higher bandwidths. For this
reason, the feature is turned off by default.

Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen 
---
 net/sched/sch_cake.c |   46 ++
 1 file changed, 46 insertions(+)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 68ac908470f1..c2cba5f32351 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -71,6 +71,10 @@
 #include 
 #include 
 
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include 
+#endif
+
 #define CAKE_SET_WAYS (8)
 #define CAKE_MAX_TINS (8)
 #define CAKE_QUEUES (1024)
@@ -516,6 +520,29 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
return drop;
 }
 
+static void cake_update_flowkeys(struct flow_keys *keys,
+const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+   struct nf_conntrack_tuple tuple = {};
+   bool rev = !skb->_nfct;
+
+   if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+   return;
+
+   if (!nf_ct_get_tuple_skb(&tuple, skb))
+   return;
+
+   keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
+   keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
+
+   if (keys->ports.ports) {
+   keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
+   keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
+   }
+#endif
+}
+
 /* Cake has several subtle multiple bit settings. In these cases you
  *  would be matching triple isolate mode as well.
  */
@@ -543,6 +570,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct 
sk_buff *skb,
skb_flow_dissect_flow_keys(skb, &keys,
   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
+   if (flow_mode & CAKE_FLOW_NAT_FLAG)
+   cake_update_flowkeys(&keys, skb);
+
/* flow_hash_from_keys() sorts the addresses by value, so we have
 * to preserve their order in a separate data structure to treat
 * src and dst host addresses as independently selectable.
@@ -1919,6 +1949,18 @@ static int cake_change(struct Qdisc *sch, struct nlattr 
*opt,
if (err < 0)
return err;
 
+   if (tb[TCA_CAKE_NAT]) {
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+   q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+   q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+   !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+   NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
+   "No conntrack support in kernel");
+   return -EOPNOTSUPP;
+#endif
+   }
+
if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
 
@@ -2091,6 +2133,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff 
*skb)
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
goto nla_put_failure;
 
+   if (nla_put_u32(skb, TCA_CAKE_NAT,
+   !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+   goto nla_put_failure;
+
return nla_nest_end(skb, opts);
 
 nla_put_failure:

___
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake


<    1   2   3   4   5   6   7   >