There are two cases were we can avoid calling ktime_get_ns() :

1) Queue is empty.
2) Internal queue is not empty.

Signed-off-by: Eric Dumazet <eduma...@google.com>
---
 net/sched/sch_fq.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 
1da8864502d448cfa93a9c02da504af61a1093e5..1a662f2bb7bb7bb5507107f61657d44fa28ca991
 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -414,16 +414,21 @@ static void fq_check_throttled(struct fq_sched_data *q, 
u64 now)
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_ns();
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
        unsigned long rate;
        u32 plen;
+       u64 now;
+
+       if (!sch->q.qlen)
+               return NULL;
 
        skb = fq_dequeue_head(sch, &q->internal);
        if (skb)
                goto out;
+
+       now = ktime_get_ns();
        fq_check_throttled(q, now);
 begin:
        head = &q->new_flows;
-- 
2.19.1.1215.g8438c0b245-goog

Reply via email to