Due to MQ support we may allocate a whole bunch of rx queues but
never use them. With this patch we'll safe the space used by
the receive buffers until they are actually in use:

sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M        35M       455M         0B         0B       4.1M
-/+ buffers/cache:        31M       459M
Swap:           0B         0B         0B
sh-4.2# ethtool -L eth0 combined 8
sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M       162M       327M         0B         0B       4.1M
-/+ buffers/cache:       158M       331M
Swap:           0B         0B         0B

Signed-off-by: Sasha Levin <sasha.le...@oracle.com>
---
 drivers/net/virtio_net.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6bfc511..4d82d17 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -581,7 +581,7 @@ static void refill_work(struct work_struct *work)
        bool still_empty;
        int i;
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
+       for (i = 0; i < vi->curr_queue_pairs; i++) {
                struct receive_queue *rq = &vi->rq[i];
 
                napi_disable(&rq->napi);
@@ -636,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
+       for (i = 0; i < vi->curr_queue_pairs; i++) {
                /* Make sure we have some buffers: if oom use wq. */
                if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
                        schedule_delayed_work(&vi->refill, 0);
@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 
queue_pairs)
        struct scatterlist sg;
        struct virtio_net_ctrl_mq s;
        struct net_device *dev = vi->dev;
+       int i;
 
        if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
                return 0;
@@ -912,8 +913,13 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 
queue_pairs)
                dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
                         queue_pairs);
                return -EINVAL;
-       } else
+       } else {
+               if (queue_pairs > vi->curr_queue_pairs)
+                       for (i = 0; i < queue_pairs; i++)
+                               if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+                                       schedule_delayed_work(&vi->refill, 0);
                vi->curr_queue_pairs = queue_pairs;
+       }
 
        return 0;
 }
@@ -1568,7 +1574,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        }
 
        /* Last of all, set up some receive buffers. */
-       for (i = 0; i < vi->max_queue_pairs; i++) {
+       for (i = 0; i < vi->curr_queue_pairs; i++) {
                try_fill_recv(&vi->rq[i], GFP_KERNEL);
 
                /* If we didn't even get one input buffer, we're useless. */
@@ -1692,7 +1698,7 @@ static int virtnet_restore(struct virtio_device *vdev)
 
        netif_device_attach(vi->dev);
 
-       for (i = 0; i < vi->max_queue_pairs; i++)
+       for (i = 0; i < vi->curr_queue_pairs; i++)
                if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
                        schedule_delayed_work(&vi->refill, 0);
 
-- 
1.8.2.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to