On Wed, May 9, 2012 at 8:50 AM, Eric Dumazet <[email protected]> wrote: > From: Eric Dumazet <[email protected]> > > An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson. > > http://queue.acm.org/detail.cfm?id=2209336 > > Based on initial work from Dave Taht. > > Refactored to help future codel inclusion as plugin for other linux > qdisc (SFQ), like done with RED plugin. > > Tested up to 10Gb speeds with no particular problems. > > Signed-off-by: Eric Dumazet <[email protected]> > Signed-off-by: Dave Taht <[email protected]> > Cc: Kathleen Nichols <[email protected]> > Cc: Van Jacobson <[email protected]> > --- > v12: algo changes after Kathleen & Van last updates > - introduction of lastcount. > - minbytes renamed to maxpacket. > - maxpacket automatically learns biggest packet size. > - ldelay record sojourn time of last dequeued packet. > - various changes, better comments... > > I hope this is the last version before upstream submission (netdev) > I'll send the iproute2 patch as well. > > include/linux/pkt_sched.h | 28 +++ > include/net/codel.h | 325 ++++++++++++++++++++++++++++++++++++ > net/sched/Kconfig | 11 + > net/sched/Makefile | 1 > net/sched/sch_codel.c | 272 ++++++++++++++++++++++++++++++ > 5 files changed, 637 insertions(+) > > diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h > index ffe975c..453dd2c 100644 > --- a/include/linux/pkt_sched.h > +++ b/include/linux/pkt_sched.h > @@ -655,4 +655,32 @@ struct tc_qfq_stats { > __u32 lmax; > }; > > +/* CODEL */ > + > +enum { > + TCA_CODEL_UNSPEC, > + TCA_CODEL_TARGET, > + TCA_CODEL_LIMIT, > + TCA_CODEL_INTERVAL, > + TCA_CODEL_ECN, > + __TCA_CODEL_MAX > +}; > + > +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) > + > +struct tc_codel_xstats { > + __u32 maxpacket; /* largest packet we've seen so far */ > + __u32 count; > + __u32 lastcount; > + __u32 ldelay; /* in-queue delay seen by most recently dequeued > packet */ > + __u32 drop_next; > + __u32 drop_overlimit; > + __u32 ecn_mark; > + __u32 dropping; > + __u32 state1; > + __u32 state2; > + __u32 state3; > + __u32 states; > +}; > + > #endif > diff --git a/include/net/codel.h b/include/net/codel.h > new file mode 100644 > index 0000000..565c1fe > --- /dev/null > +++ b/include/net/codel.h > @@ -0,0 +1,325 @@ > +#ifndef __NET_SCHED_CODEL_H > +#define __NET_SCHED_CODEL_H > + > +/* > + * Codel - The Controlled-Delay Active Queue Management algorithm > + * > + * Copyright (C) 2011-2012 Kathleen Nichols <[email protected]> > + * Copyright (C) 2011-2012 Van Jacobson <[email protected]> > + * Copyright (C) 2012 Michael D. Taht <[email protected]> > + * Copyright (C) 2012 Eric Dumazet <[email protected]> > + * > + * Redistribution and use in source and binary forms, with or without > + * modification, are permitted provided that the following conditions > + * are met: > + * 1. Redistributions of source code must retain the above copyright > + * notice, this list of conditions, and the following disclaimer, > + * without modification. > + * 2. Redistributions in binary form must reproduce the above copyright > + * notice, this list of conditions and the following disclaimer in the > + * documentation and/or other materials provided with the distribution. > + * 3. The names of the authors may not be used to endorse or promote products > + * derived from this software without specific prior written permission. > + * > + * Alternatively, provided that this notice is retained in full, this > + * software may be distributed under the terms of the GNU General > + * Public License ("GPL") version 2, in which case the provisions of the > + * GPL apply INSTEAD OF those given above. > + * > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS > + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT > + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR > + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT > + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, > + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT > + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, > + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY > + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT > + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE > + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH > + * DAMAGE. > + * > + */ > + > +#include <linux/types.h> > +#include <linux/bug.h> > +#include <linux/ktime.h> > +#include <net/inet_ecn.h> > + > +/* Controlling Queue Delay (CoDel) algorithm > + * ========================================= > + * Source : Kathleen Nichols and Van Jacobson > + * http://queue.acm.org/detail.cfm?id=2209396
This link is dead. Looks like it's a typo and should be: http://queue.acm.org/detail.cfm?id=2209336 Josh > + * > + * Implemented on linux by Dave Taht and Eric Dumazet > + */ > + > + > +/* > + * CoDel uses a 1024 nsec clock, encoded in u32 > + * This gives a range of 2199 seconds, because of signed compares > + */ > +typedef u32 codel_time_t; > +#define CODEL_SHIFT 10 > +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) > + > +static inline codel_time_t codel_get_time(void) > +{ > + u64 ns = ktime_to_ns(ktime_get()); > + > + return ns >> CODEL_SHIFT; > +} > + > +#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0) > +#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0) > +#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0) > +#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0) > + > +struct codel_skb_cb { > + codel_time_t enqueue_time; > +}; > + > +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) > +{ > + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); > + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; > +} > + > +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) > +{ > + return get_codel_cb(skb)->enqueue_time; > +} > + > +static void codel_set_enqueue_time(struct sk_buff *skb) > +{ > + get_codel_cb(skb)->enqueue_time = codel_get_time(); > +} > + > +static inline u32 codel_time_to_us(codel_time_t val) > +{ > + u64 valns = ((u64)val << CODEL_SHIFT); > + > + do_div(valns, NSEC_PER_USEC); > + return (u32)valns; > +} > + > +struct codel_params { > + u32 maxpacket; /* largest packet we've seen so far */ > + codel_time_t target; /* target queue size (in time units) */ > + codel_time_t interval; /* width of moving time window */ > + bool ecn; /* is ECN enabled */ > +}; > + > +struct codel_vars { > + u32 count; /* how many drops we've done since the last > time > + * we entered dropping state > + */ > + u32 lastcount; /* count at entry to dropping state */ > + bool dropping; /* set to true id in dropping state */ > + > + codel_time_t first_above_time; /* when we went (or will go) > continuously > + * above target for interval > + */ > + codel_time_t drop_next; /* time to drop next packet, or when > we dropped last */ > + codel_time_t ldelay; /* sojourn time of last dequeued packet */ > +}; > + > +/* contains stats and some shared info */ > +struct codel_stats { > + struct Qdisc *sch; > + u32 drop_count; /* temp count of dropped packets in > dequeue() */ > + > + u32 ecn_mark; /* number of packets we ECN marked instead > of dropping */ > + u32 states; /* number of codel_dequeue() calls */ > + u32 state1; /* number of times ok_to_drop was set to true > */ > + u32 state2; > + u32 state3; > +}; > + > +static void codel_params_init(struct codel_params *params, > + const struct Qdisc *sch) > +{ > + params->maxpacket = 256; > + params->interval = MS2TIME(100); > + params->target = MS2TIME(5); > + params->ecn = false; > +} > + > +static void codel_vars_init(struct codel_vars *vars) > +{ > + vars->drop_next = 0; > + vars->first_above_time = 0; > + vars->dropping = false; /* exit dropping state */ > + vars->count = 0; > + vars->lastcount = 0; > +} > + > +static void codel_stats_init(struct codel_stats *stats, > + struct Qdisc *sch) > +{ > + stats->sch = sch; /* back pointer for qdisc_drop() calls */ > +} > + > +/* return interval/sqrt(x) with good precision */ > +static u32 codel_inv_sqrt(u32 _interval, u32 _x) > +{ > + u64 interval = _interval; > + unsigned long x = _x; > + > + /* Scale operands for max precision */ > + > +#if BITS_PER_LONG == 64 > + x <<= 32; /* On 64bit arches, we can prescale x by 32bits */ > + interval <<= 16; > +#endif > + > + while (x < (1UL << (BITS_PER_LONG - 2))) { > + x <<= 2; > + interval <<= 1; > + } > + do_div(interval, int_sqrt(x)); > + return (u32)interval; > +} > + > +static codel_time_t codel_control_law(codel_time_t t, > + codel_time_t interval, > + u32 count) > +{ > + return t + codel_inv_sqrt(interval, count); > +} > + > + > +static bool codel_should_drop(struct sk_buff *skb, > + unsigned int *backlog, > + struct codel_vars *vars, > + struct codel_params *params, > + struct codel_stats *stats, > + codel_time_t now) > +{ > + bool ok_to_drop; > + > + if (!skb) { > + vars->first_above_time = 0; > + return false; > + } > + > + vars->ldelay = now - codel_get_enqueue_time(skb); > + *backlog -= qdisc_pkt_len(skb); > + > + if (unlikely(qdisc_pkt_len(skb) > params->maxpacket)) > + params->maxpacket = qdisc_pkt_len(skb); > + > + if (codel_time_before(vars->ldelay, params->target) || > + *backlog <= params->maxpacket) { > + /* went below - stay below for at least interval */ > + vars->first_above_time = 0; > + return false; > + } > + ok_to_drop = false; > + if (vars->first_above_time == 0) { > + /* just went above from below. If we stay above > + * for at least interval we'll say it's ok to drop > + */ > + vars->first_above_time = now + params->interval; > + } else if (codel_time_after(now, vars->first_above_time)) { > + ok_to_drop = true; > + stats->state1++; > + } > + return ok_to_drop; > +} > + > +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, > + struct Qdisc *sch); > + > +static struct sk_buff *codel_dequeue(struct codel_params *params, > + struct codel_vars *vars, > + struct codel_stats *stats, > + codel_skb_dequeue_t dequeue_func, > + u32 *backlog) > +{ > + struct sk_buff *skb = dequeue_func(vars, stats->sch); > + codel_time_t now; > + bool drop; > + > + stats->states++; > + if (!skb) { > + vars->dropping = false; > + return skb; > + } > + now = codel_get_time(); > + drop = codel_should_drop(skb, backlog, > + vars, params, stats, > + now); > + if (vars->dropping) { > + if (!drop) { > + /* sojourn time below target - leave dropping state */ > + vars->dropping = false; > + } else if (codel_time_after_eq(now, vars->drop_next)) { > + stats->state2++; > + /* It's time for the next drop. Drop the current > + * packet and dequeue the next. The dequeue might > + * take us out of dropping state. > + * If not, schedule the next drop. > + * A large backlog might result in drop rates so high > + * that the next drop should happen now, > + * hence the while loop. > + */ > + while (vars->dropping && > + codel_time_after_eq(now, vars->drop_next)) { > + vars->count++; > + if (params->ecn && INET_ECN_set_ce(skb)) { > + stats->ecn_mark++; > + vars->drop_next = > + > codel_control_law(vars->drop_next, > + > params->interval, > + > vars->count); > + goto end; > + } > + qdisc_drop(skb, stats->sch); > + stats->drop_count++; > + skb = dequeue_func(vars, stats->sch); > + if (!codel_should_drop(skb, backlog, > + vars, params, stats, > now)) { > + /* leave dropping state */ > + vars->dropping = false; > + } else { > + /* and schedule the next drop */ > + vars->drop_next = > + > codel_control_law(vars->drop_next, > + > params->interval, > + > vars->count); > + } > + } > + } > + } else if (drop) { > + if (params->ecn && INET_ECN_set_ce(skb)) { > + stats->ecn_mark++; > + } else { > + qdisc_drop(skb, stats->sch); > + stats->drop_count++; > + > + skb = dequeue_func(vars, stats->sch); > + drop = codel_should_drop(skb, backlog, vars, params, > + stats, now); > + } > + vars->dropping = true; > + stats->state3++; > + /* > + * if min went above target close to when we last went below > it > + * assume that the drop rate that controlled the queue on the > + * last cycle is a good starting point to control it now. > + */ > + if (codel_time_before(now - vars->drop_next, > + 16 * params->interval)) { > + vars->count = vars->count - vars->lastcount + 1; > + } else { > + vars->count = 1; > + } > + vars->lastcount = vars->count; > + vars->drop_next = codel_control_law(now, params->interval, > + vars->count); > + } > +end: > + return skb; > +} > +#endif > diff --git a/net/sched/Kconfig b/net/sched/Kconfig > index 75b58f8..fadd252 100644 > --- a/net/sched/Kconfig > +++ b/net/sched/Kconfig > @@ -250,6 +250,17 @@ config NET_SCH_QFQ > > If unsure, say N. > > +config NET_SCH_CODEL > + tristate "Controlled Delay AQM (CODEL)" > + help > + Say Y here if you want to use the Controlled Delay (CODEL) > + packet scheduling algorithm. > + > + To compile this driver as a module, choose M here: the module > + will be called sch_codel. > + > + If unsure, say N. > + > config NET_SCH_INGRESS > tristate "Ingress Qdisc" > depends on NET_CLS_ACT > diff --git a/net/sched/Makefile b/net/sched/Makefile > index 8cdf4e2..30fab03 100644 > --- a/net/sched/Makefile > +++ b/net/sched/Makefile > @@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o > obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o > obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o > obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o > +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o > > obj-$(CONFIG_NET_CLS_U32) += cls_u32.o > obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o > diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c > new file mode 100644 > index 0000000..3112afa > --- /dev/null > +++ b/net/sched/sch_codel.c > @@ -0,0 +1,272 @@ > +/* > + * Codel - The Controlled-Delay Active Queue Management algorithm > + * > + * Copyright (C) 2011-2012 Kathleen Nichols <[email protected]> > + * Copyright (C) 2011-2012 Van Jacobson <[email protected]> > + * > + * Implemented on linux by : > + * Copyright (C) 2012 Michael D. Taht <[email protected]> > + * Copyright (C) 2012 Eric Dumazet <[email protected]> > + * > + * Redistribution and use in source and binary forms, with or without > + * modification, are permitted provided that the following conditions > + * are met: > + * 1. Redistributions of source code must retain the above copyright > + * notice, this list of conditions, and the following disclaimer, > + * without modification. > + * 2. Redistributions in binary form must reproduce the above copyright > + * notice, this list of conditions and the following disclaimer in the > + * documentation and/or other materials provided with the distribution. > + * 3. The names of the authors may not be used to endorse or promote products > + * derived from this software without specific prior written permission. > + * > + * Alternatively, provided that this notice is retained in full, this > + * software may be distributed under the terms of the GNU General > + * Public License ("GPL") version 2, in which case the provisions of the > + * GPL apply INSTEAD OF those given above. > + * > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS > + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT > + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR > + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT > + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, > + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT > + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, > + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY > + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT > + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE > + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH > + * DAMAGE. > + * > + */ > + > +#include <linux/module.h> > +#include <linux/slab.h> > +#include <linux/types.h> > +#include <linux/kernel.h> > +#include <linux/errno.h> > +#include <linux/skbuff.h> > +#include <net/pkt_sched.h> > +#include <net/codel.h> > + > + > +#define DEFAULT_CODEL_LIMIT 1000 > + > +struct codel_sched_data { > + struct codel_params params; > + struct codel_vars vars; > + struct codel_stats stats; > + u32 drop_overlimit; > +}; > + > +/* This is the specific function called from codel_dequeue() > + * to dequeue a packet from queue. Note: backlog is handled in > + * codel, we dont need to reduce it here. > + */ > +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) > +{ > + struct sk_buff *skb = __skb_dequeue(&sch->q); > + > + prefetch(&skb->end); /* we'll need skb_shinfo() */ > + return skb; > +} > + > +static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) > +{ > + struct codel_sched_data *q = qdisc_priv(sch); > + struct sk_buff *skb; > + > + skb = codel_dequeue(&q->params, &q->vars, &q->stats, > + dequeue, &sch->qstats.backlog); > + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, > + * or HTB crashes. Defer it for next round. > + */ > + if (q->stats.drop_count && sch->q.qlen) { > + qdisc_tree_decrease_qlen(sch, q->stats.drop_count); > + q->stats.drop_count = 0; > + } > + if (skb) > + qdisc_bstats_update(sch, skb); > + return skb; > +} > + > +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) > +{ > + struct codel_sched_data *q; > + > + if (likely(qdisc_qlen(sch) < sch->limit)) { > + codel_set_enqueue_time(skb); > + return qdisc_enqueue_tail(skb, sch); > + } > + q = qdisc_priv(sch); > + q->drop_overlimit++; > + return qdisc_drop(skb, sch); > +} > + > +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { > + [TCA_CODEL_TARGET] = { .type = NLA_U32 }, > + [TCA_CODEL_LIMIT] = { .type = NLA_U32 }, > + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 }, > + [TCA_CODEL_ECN] = { .type = NLA_U32 }, > +}; > + > +static int codel_change(struct Qdisc *sch, struct nlattr *opt) > +{ > + struct codel_sched_data *q = qdisc_priv(sch); > + struct nlattr *tb[TCA_CODEL_MAX + 1]; > + unsigned int qlen; > + int err; > + > + if (!opt) > + return -EINVAL; > + > + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); > + if (err < 0) > + return err; > + > + sch_tree_lock(sch); > + if (tb[TCA_CODEL_TARGET]) { > + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); > + > + q->params.target = ((u64)target * NSEC_PER_USEC) >> > CODEL_SHIFT; > + } > + if (tb[TCA_CODEL_INTERVAL]) { > + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); > + > + q->params.interval = ((u64)interval * NSEC_PER_USEC) >> > CODEL_SHIFT; > + } > + if (tb[TCA_CODEL_LIMIT]) > + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); > + > + if (tb[TCA_CODEL_ECN]) > + q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); > + > + qlen = sch->q.qlen; > + while (sch->q.qlen > sch->limit) { > + struct sk_buff *skb = __skb_dequeue(&sch->q); > + > + sch->qstats.backlog -= qdisc_pkt_len(skb); > + qdisc_drop(skb, sch); > + } > + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); > + > + sch_tree_unlock(sch); > + return 0; > +} > + > +static int codel_init(struct Qdisc *sch, struct nlattr *opt) > +{ > + struct codel_sched_data *q = qdisc_priv(sch); > + > + /* It should be possible to run with no limit, > + * with infinite memory :) > + */ > + sch->limit = DEFAULT_CODEL_LIMIT; > + > + codel_params_init(&q->params, sch); > + codel_vars_init(&q->vars); > + codel_stats_init(&q->stats, sch); > + > + if (opt) { > + int err = codel_change(sch, opt); > + > + if (err) > + return err; > + } > + > + if (sch->limit >= 1) > + sch->flags |= TCQ_F_CAN_BYPASS; > + else > + sch->flags &= ~TCQ_F_CAN_BYPASS; > + > + return 0; > +} > + > +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) > +{ > + struct codel_sched_data *q = qdisc_priv(sch); > + struct nlattr *opts; > + > + opts = nla_nest_start(skb, TCA_OPTIONS); > + if (opts == NULL) > + goto nla_put_failure; > + > + if (nla_put_u32(skb, TCA_CODEL_TARGET, > + codel_time_to_us(q->params.target)) || > + nla_put_u32(skb, TCA_CODEL_LIMIT, > + sch->limit) || > + nla_put_u32(skb, TCA_CODEL_INTERVAL, > + codel_time_to_us(q->params.interval)) || > + nla_put_u32(skb, TCA_CODEL_ECN, > + q->params.ecn)) > + goto nla_put_failure; > + > + return nla_nest_end(skb, opts); > + > +nla_put_failure: > + nla_nest_cancel(skb, opts); > + return -1; > +} > + > +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) > +{ > + const struct codel_sched_data *q = qdisc_priv(sch); > + struct tc_codel_xstats st = { > + .maxpacket = q->params.maxpacket, > + .count = q->vars.count, > + .lastcount = q->vars.lastcount, > + .states = q->stats.states, > + .state1 = q->stats.state1, > + .state2 = q->stats.state2, > + .state3 = q->stats.state3, > + .drop_overlimit = q->drop_overlimit, > + .ldelay = codel_time_to_us(q->vars.ldelay), > + .dropping = q->vars.dropping, > + .ecn_mark = q->stats.ecn_mark, > + }; > + > + if (q->vars.dropping && q->vars.drop_next) > + st.drop_next = codel_time_to_us(q->vars.drop_next - > + codel_get_time()); > + > + return gnet_stats_copy_app(d, &st, sizeof(st)); > +} > + > +static void codel_reset(struct Qdisc *sch) > +{ > + struct codel_sched_data *q = qdisc_priv(sch); > + > + qdisc_reset_queue(sch); > + codel_vars_init(&q->vars); > +} > + > +static struct Qdisc_ops codel_qdisc_ops __read_mostly = { > + .id = "codel", > + .priv_size = sizeof(struct codel_sched_data), > + > + .enqueue = codel_qdisc_enqueue, > + .dequeue = codel_qdisc_dequeue, > + .peek = qdisc_peek_dequeued, > + .init = codel_init, > + .reset = codel_reset, > + .change = codel_change, > + .dump = codel_dump, > + .dump_stats = codel_dump_stats, > + .owner = THIS_MODULE, > +}; > + > +static int __init codel_module_init(void) > +{ > + return register_qdisc(&codel_qdisc_ops); > +} > +static void __exit codel_module_exit(void) > +{ > + unregister_qdisc(&codel_qdisc_ops); > +} > +module_init(codel_module_init) > +module_exit(codel_module_exit) > + > +MODULE_DESCRIPTION("Controlled Delay queue discipline"); > +MODULE_AUTHOR("Dave Taht"); > +MODULE_AUTHOR("Eric Dumazet"); > +MODULE_LICENSE("Dual BSD/GPL"); > > > _______________________________________________ > Bloat mailing list > [email protected] > https://lists.bufferbloat.net/listinfo/bloat _______________________________________________ Bloat mailing list [email protected] https://lists.bufferbloat.net/listinfo/bloat
