Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-22 Thread Jason Wang

On 07/20/2012 08:33 PM, Michael S. Tsirkin wrote:

On Thu, Jul 05, 2012 at 06:29:54PM +0800, Jason Wang wrote:

This patch let the virtio_net driver can negotiate the number of queues it
wishes to use through control virtqueue and export an ethtool interface to let
use tweak it.

As current multiqueue virtio-net implementation has optimizations on per-cpu
virtuqueues, so only two modes were support:

- single queue pair mode
- multiple queue paris mode, the number of queues matches the number of vcpus

The single queue mode were used by default currently due to regression of
multiqueue mode in some test (especially in stream test).

Since virtio core does not support paritially deleting virtqueues, so during
mode switching the whole virtqueue were deleted and the driver would re-create
the virtqueues it would used.

btw. The queue number negotiating were defered to .ndo_open(), this is because
only after feature negotitaion could we send the command to control virtqueue
(as it may also use event index).

Signed-off-by: Jason Wangjasow...@redhat.com
---
  drivers/net/virtio_net.c   |  171 ++-
  include/linux/virtio_net.h |7 ++
  2 files changed, 142 insertions(+), 36 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7410187..3339eeb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -88,6 +88,7 @@ struct receive_queue {

  struct virtnet_info {
u16 num_queue_pairs;/* # of RX/TX vq pairs */
+   u16 total_queue_pairs;

struct send_queue *sq[MAX_QUEUES] cacheline_aligned_in_smp;
struct receive_queue *rq[MAX_QUEUES] cacheline_aligned_in_smp;
@@ -137,6 +138,8 @@ struct padded_vnet_hdr {
char padding[6];
  };

+static const struct ethtool_ops virtnet_ethtool_ops;
+
  static inline int txq_get_qnum(struct virtnet_info *vi, struct virtqueue *vq)
  {
int ret = virtqueue_get_queue_index(vq);
@@ -802,22 +805,6 @@ static void virtnet_netpoll(struct net_device *dev)
  }
  #endif

-static int virtnet_open(struct net_device *dev)
-{
-   struct virtnet_info *vi = netdev_priv(dev);
-   int i;
-
-   for (i = 0; i  vi-num_queue_pairs; i++) {
-   /* Make sure we have some buffers: if oom use wq. */
-   if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
-   queue_delayed_work(system_nrt_wq,
-   vi-rq[i]-refill, 0);
-   virtnet_napi_enable(vi-rq[i]);
-   }
-
-   return 0;
-}
-
  /*
   * Send command via the control virtqueue and check status.  Commands
   * supported by the hypervisor, as indicated by feature bits, should
@@ -873,6 +860,43 @@ static void virtnet_ack_link_announce(struct virtnet_info 
*vi)
rtnl_unlock();
  }

+static int virtnet_set_queues(struct virtnet_info *vi)
+{
+   struct scatterlist sg;
+   struct net_device *dev = vi-dev;
+   sg_init_one(sg,vi-num_queue_pairs, sizeof(vi-num_queue_pairs));
+
+   if (!vi-has_cvq)
+   return -EINVAL;
+
+   if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MULTIQUEUE,
+ VIRTIO_NET_CTRL_MULTIQUEUE_QNUM,sg, 1, 0)){
+   dev_warn(dev-dev, Fail to set the number of queue pairs to
+ %d\n, vi-num_queue_pairs);
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static int virtnet_open(struct net_device *dev)
+{
+   struct virtnet_info *vi = netdev_priv(dev);
+   int i;
+
+   for (i = 0; i  vi-num_queue_pairs; i++) {
+   /* Make sure we have some buffers: if oom use wq. */
+   if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
+   queue_delayed_work(system_nrt_wq,
+   vi-rq[i]-refill, 0);
+   virtnet_napi_enable(vi-rq[i]);
+   }
+
+   virtnet_set_queues(vi);
+
+   return 0;
+}
+
  static int virtnet_close(struct net_device *dev)
  {
struct virtnet_info *vi = netdev_priv(dev);
@@ -1013,12 +1037,6 @@ static void virtnet_get_drvinfo(struct net_device *dev,

  }

-static const struct ethtool_ops virtnet_ethtool_ops = {
-   .get_drvinfo = virtnet_get_drvinfo,
-   .get_link = ethtool_op_get_link,
-   .get_ringparam = virtnet_get_ringparam,
-};
-
  #define MIN_MTU 68
  #define MAX_MTU 65535

@@ -1235,7 +1253,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)

  err:
if (ret  names)
-   for (i = 0; i  vi-num_queue_pairs * 2; i++)
+   for (i = 0; i  total_vqs * 2; i++)
kfree(names[i]);

kfree(names);
@@ -1373,7 +1391,6 @@ static int virtnet_probe(struct virtio_device *vdev)
mutex_init(vi-config_lock);
vi-config_enable = true;
INIT_WORK(vi-config_work, virtnet_config_changed_work);
-   vi-num_queue_pairs = num_queue_pairs;

/* If we can receive ANY GSO packets, we must allocate 

Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-20 Thread Michael S. Tsirkin
On Thu, Jul 05, 2012 at 06:29:54PM +0800, Jason Wang wrote:
 This patch let the virtio_net driver can negotiate the number of queues it
 wishes to use through control virtqueue and export an ethtool interface to let
 use tweak it.
 
 As current multiqueue virtio-net implementation has optimizations on per-cpu
 virtuqueues, so only two modes were support:
 
 - single queue pair mode
 - multiple queue paris mode, the number of queues matches the number of vcpus
 
 The single queue mode were used by default currently due to regression of
 multiqueue mode in some test (especially in stream test).
 
 Since virtio core does not support paritially deleting virtqueues, so during
 mode switching the whole virtqueue were deleted and the driver would re-create
 the virtqueues it would used.
 
 btw. The queue number negotiating were defered to .ndo_open(), this is because
 only after feature negotitaion could we send the command to control virtqueue
 (as it may also use event index).
 
 Signed-off-by: Jason Wang jasow...@redhat.com
 ---
  drivers/net/virtio_net.c   |  171 ++-
  include/linux/virtio_net.h |7 ++
  2 files changed, 142 insertions(+), 36 deletions(-)
 
 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
 index 7410187..3339eeb 100644
 --- a/drivers/net/virtio_net.c
 +++ b/drivers/net/virtio_net.c
 @@ -88,6 +88,7 @@ struct receive_queue {
  
  struct virtnet_info {
   u16 num_queue_pairs;/* # of RX/TX vq pairs */
 + u16 total_queue_pairs;
  
   struct send_queue *sq[MAX_QUEUES] cacheline_aligned_in_smp;
   struct receive_queue *rq[MAX_QUEUES] cacheline_aligned_in_smp;
 @@ -137,6 +138,8 @@ struct padded_vnet_hdr {
   char padding[6];
  };
  
 +static const struct ethtool_ops virtnet_ethtool_ops;
 +
  static inline int txq_get_qnum(struct virtnet_info *vi, struct virtqueue *vq)
  {
   int ret = virtqueue_get_queue_index(vq);
 @@ -802,22 +805,6 @@ static void virtnet_netpoll(struct net_device *dev)
  }
  #endif
  
 -static int virtnet_open(struct net_device *dev)
 -{
 - struct virtnet_info *vi = netdev_priv(dev);
 - int i;
 -
 - for (i = 0; i  vi-num_queue_pairs; i++) {
 - /* Make sure we have some buffers: if oom use wq. */
 - if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
 - queue_delayed_work(system_nrt_wq,
 -vi-rq[i]-refill, 0);
 - virtnet_napi_enable(vi-rq[i]);
 - }
 -
 - return 0;
 -}
 -
  /*
   * Send command via the control virtqueue and check status.  Commands
   * supported by the hypervisor, as indicated by feature bits, should
 @@ -873,6 +860,43 @@ static void virtnet_ack_link_announce(struct 
 virtnet_info *vi)
   rtnl_unlock();
  }
  
 +static int virtnet_set_queues(struct virtnet_info *vi)
 +{
 + struct scatterlist sg;
 + struct net_device *dev = vi-dev;
 + sg_init_one(sg, vi-num_queue_pairs, sizeof(vi-num_queue_pairs));
 +
 + if (!vi-has_cvq)
 + return -EINVAL;
 +
 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MULTIQUEUE,
 +   VIRTIO_NET_CTRL_MULTIQUEUE_QNUM, sg, 1, 0)){
 + dev_warn(dev-dev, Fail to set the number of queue pairs to
 +   %d\n, vi-num_queue_pairs);
 + return -EINVAL;
 + }
 +
 + return 0;
 +}
 +
 +static int virtnet_open(struct net_device *dev)
 +{
 + struct virtnet_info *vi = netdev_priv(dev);
 + int i;
 +
 + for (i = 0; i  vi-num_queue_pairs; i++) {
 + /* Make sure we have some buffers: if oom use wq. */
 + if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
 + queue_delayed_work(system_nrt_wq,
 +vi-rq[i]-refill, 0);
 + virtnet_napi_enable(vi-rq[i]);
 + }
 +
 + virtnet_set_queues(vi);
 +
 + return 0;
 +}
 +
  static int virtnet_close(struct net_device *dev)
  {
   struct virtnet_info *vi = netdev_priv(dev);
 @@ -1013,12 +1037,6 @@ static void virtnet_get_drvinfo(struct net_device *dev,
  
  }
  
 -static const struct ethtool_ops virtnet_ethtool_ops = {
 - .get_drvinfo = virtnet_get_drvinfo,
 - .get_link = ethtool_op_get_link,
 - .get_ringparam = virtnet_get_ringparam,
 -};
 -
  #define MIN_MTU 68
  #define MAX_MTU 65535
  
 @@ -1235,7 +1253,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
  
  err:
   if (ret  names)
 - for (i = 0; i  vi-num_queue_pairs * 2; i++)
 + for (i = 0; i  total_vqs * 2; i++)
   kfree(names[i]);
  
   kfree(names);
 @@ -1373,7 +1391,6 @@ static int virtnet_probe(struct virtio_device *vdev)
   mutex_init(vi-config_lock);
   vi-config_enable = true;
   INIT_WORK(vi-config_work, virtnet_config_changed_work);
 - vi-num_queue_pairs = num_queue_pairs;
  
   /* If we can receive ANY GSO packets, we must allocate large ones. */
 

Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-09 Thread Ben Hutchings
On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:
 This patch let the virtio_net driver can negotiate the number of queues it
 wishes to use through control virtqueue and export an ethtool interface to let
 use tweak it.
 
 As current multiqueue virtio-net implementation has optimizations on per-cpu
 virtuqueues, so only two modes were support:
 
 - single queue pair mode
 - multiple queue paris mode, the number of queues matches the number of vcpus
 
 The single queue mode were used by default currently due to regression of
 multiqueue mode in some test (especially in stream test).
 
 Since virtio core does not support paritially deleting virtqueues, so during
 mode switching the whole virtqueue were deleted and the driver would re-create
 the virtqueues it would used.
 
 btw. The queue number negotiating were defered to .ndo_open(), this is because
 only after feature negotitaion could we send the command to control virtqueue
 (as it may also use event index).
[...]
 +static int virtnet_set_channels(struct net_device *dev,
 + struct ethtool_channels *channels)
 +{
 + struct virtnet_info *vi = netdev_priv(dev);
 + u16 queues = channels-rx_count;
 + unsigned status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER;
 +
 + if (channels-rx_count != channels-tx_count)
 + return -EINVAL;
[...]
 +static void virtnet_get_channels(struct net_device *dev,
 +  struct ethtool_channels *channels)
 +{
 + struct virtnet_info *vi = netdev_priv(dev);
 +
 + channels-max_rx = vi-total_queue_pairs;
 + channels-max_tx = vi-total_queue_pairs;
 + channels-max_other = 0;
 + channels-max_combined = 0;
 + channels-rx_count = vi-num_queue_pairs;
 + channels-tx_count = vi-num_queue_pairs;
 + channels-other_count = 0;
 + channels-combined_count = 0;
 +}
[...]

It looks like the queue-pairs should be treated as 'combined channels',
not separate RX and TX channels.  Also you don't need to clear the other
members; you can assume that the ethtool core will zero-initialise
structures for 'get' operations.

Ben.

-- 
Ben Hutchings, Staff Engineer, Solarflare
Not speaking for my employer; that's the marketing department's job.
They asked us to note that Solarflare product names are trademarked.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-06 Thread Stephen Hemminger
On Fri, 06 Jul 2012 11:20:06 +0800
Jason Wang jasow...@redhat.com wrote:

 On 07/05/2012 08:51 PM, Sasha Levin wrote:
  On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:
  @@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
   if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
   vi-has_cvq = true;
 
  +   /* Use single tx/rx queue pair as default */
  +   vi-num_queue_pairs = 1;
  +   vi-total_queue_pairs = num_queue_pairs;
  The code is using this default even if the amount of queue pairs it
  wants was specified during initialization. This basically limits any
  device to use 1 pair when starting up.
 
 
 Yes, currently the virtio-net driver would use 1 txq/txq by default 
 since multiqueue may not outperform in all kinds of workload. So it's 
 better to keep it as default and let user enable multiqueue by ethtool -L.
 

I would prefer that the driver sized number of queues based on number
of online CPU's. That is what real hardware does. What kind of workload
are you doing? If it is some DBMS benchmark then maybe the issue is that
some CPU's need to be reserved.
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-06 Thread Jason Wang

On 07/06/2012 04:07 AM, Amos Kong wrote:

On 07/05/2012 08:51 PM, Sasha Levin wrote:

On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:

@@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
 vi-has_cvq = true;




+   /* Use single tx/rx queue pair as default */
+   vi-num_queue_pairs = 1;
+   vi-total_queue_pairs = num_queue_pairs;

vi-total_queue_pairs also should be set to 1

vi-total_queue_pairs = 1;


Hi Amos:

total_queue_pairs is the max number of queue pairs that the deivce could 
provide, so it's ok here.

The code is using this default even if the amount of queue pairs it
wants was specified during initialization. This basically limits any
device to use 1 pair when starting up.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html




___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-06 Thread Sasha Levin
On Fri, 2012-07-06 at 11:20 +0800, Jason Wang wrote:
 On 07/05/2012 08:51 PM, Sasha Levin wrote:
  On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:
  @@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
   if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
   vi-has_cvq = true;
 
  +   /* Use single tx/rx queue pair as default */
  +   vi-num_queue_pairs = 1;
  +   vi-total_queue_pairs = num_queue_pairs;
  The code is using this default even if the amount of queue pairs it
  wants was specified during initialization. This basically limits any
  device to use 1 pair when starting up.
 
 
 Yes, currently the virtio-net driver would use 1 txq/txq by default 
 since multiqueue may not outperform in all kinds of workload. So it's 
 better to keep it as default and let user enable multiqueue by ethtool -L.

I think it makes sense to set it to 1 if the amount of initial queue
pairs wasn't specified.

On the other hand, if a virtio-net driver was probed to provide
VIRTIO_NET_F_MULTIQUEUE and has set something reasonable in
virtio_net_config.num_queues, then that setting shouldn't be quietly
ignored and reset back to 1.

What I'm basically saying is that I agree that the *default* should be 1
- but if the user has explicitly asked for something else during
initialization, then the default should be overridden.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-06 Thread Jason Wang

On 07/06/2012 02:38 PM, Stephen Hemminger wrote:

On Fri, 06 Jul 2012 11:20:06 +0800
Jason Wangjasow...@redhat.com  wrote:


On 07/05/2012 08:51 PM, Sasha Levin wrote:

On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:

@@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
  if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
  vi-has_cvq = true;

+   /* Use single tx/rx queue pair as default */
+   vi-num_queue_pairs = 1;
+   vi-total_queue_pairs = num_queue_pairs;

The code is using this default even if the amount of queue pairs it
wants was specified during initialization. This basically limits any
device to use 1 pair when starting up.


Yes, currently the virtio-net driver would use 1 txq/txq by default
since multiqueue may not outperform in all kinds of workload. So it's
better to keep it as default and let user enable multiqueue by ethtool -L.


I would prefer that the driver sized number of queues based on number
of online CPU's. That is what real hardware does. What kind of workload
are you doing? If it is some DBMS benchmark then maybe the issue is that
some CPU's need to be reserved.


I run rr and stream test of netperf, and multiqueue shows improvement on 
rr test and regression on small packet transmission in stream test. For 
small packet transmission, multiqueue tends to send much more small 
packets which also increase the cpu utilization. I suspect multiqueue is 
faster and tcp does not merger big enough packet to send, but may need 
more think.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-05 Thread Jason Wang
This patch let the virtio_net driver can negotiate the number of queues it
wishes to use through control virtqueue and export an ethtool interface to let
use tweak it.

As current multiqueue virtio-net implementation has optimizations on per-cpu
virtuqueues, so only two modes were support:

- single queue pair mode
- multiple queue paris mode, the number of queues matches the number of vcpus

The single queue mode were used by default currently due to regression of
multiqueue mode in some test (especially in stream test).

Since virtio core does not support paritially deleting virtqueues, so during
mode switching the whole virtqueue were deleted and the driver would re-create
the virtqueues it would used.

btw. The queue number negotiating were defered to .ndo_open(), this is because
only after feature negotitaion could we send the command to control virtqueue
(as it may also use event index).

Signed-off-by: Jason Wang jasow...@redhat.com
---
 drivers/net/virtio_net.c   |  171 ++-
 include/linux/virtio_net.h |7 ++
 2 files changed, 142 insertions(+), 36 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7410187..3339eeb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -88,6 +88,7 @@ struct receive_queue {
 
 struct virtnet_info {
u16 num_queue_pairs;/* # of RX/TX vq pairs */
+   u16 total_queue_pairs;
 
struct send_queue *sq[MAX_QUEUES] cacheline_aligned_in_smp;
struct receive_queue *rq[MAX_QUEUES] cacheline_aligned_in_smp;
@@ -137,6 +138,8 @@ struct padded_vnet_hdr {
char padding[6];
 };
 
+static const struct ethtool_ops virtnet_ethtool_ops;
+
 static inline int txq_get_qnum(struct virtnet_info *vi, struct virtqueue *vq)
 {
int ret = virtqueue_get_queue_index(vq);
@@ -802,22 +805,6 @@ static void virtnet_netpoll(struct net_device *dev)
 }
 #endif
 
-static int virtnet_open(struct net_device *dev)
-{
-   struct virtnet_info *vi = netdev_priv(dev);
-   int i;
-
-   for (i = 0; i  vi-num_queue_pairs; i++) {
-   /* Make sure we have some buffers: if oom use wq. */
-   if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
-   queue_delayed_work(system_nrt_wq,
-  vi-rq[i]-refill, 0);
-   virtnet_napi_enable(vi-rq[i]);
-   }
-
-   return 0;
-}
-
 /*
  * Send command via the control virtqueue and check status.  Commands
  * supported by the hypervisor, as indicated by feature bits, should
@@ -873,6 +860,43 @@ static void virtnet_ack_link_announce(struct virtnet_info 
*vi)
rtnl_unlock();
 }
 
+static int virtnet_set_queues(struct virtnet_info *vi)
+{
+   struct scatterlist sg;
+   struct net_device *dev = vi-dev;
+   sg_init_one(sg, vi-num_queue_pairs, sizeof(vi-num_queue_pairs));
+
+   if (!vi-has_cvq)
+   return -EINVAL;
+
+   if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MULTIQUEUE,
+ VIRTIO_NET_CTRL_MULTIQUEUE_QNUM, sg, 1, 0)){
+   dev_warn(dev-dev, Fail to set the number of queue pairs to
+ %d\n, vi-num_queue_pairs);
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static int virtnet_open(struct net_device *dev)
+{
+   struct virtnet_info *vi = netdev_priv(dev);
+   int i;
+
+   for (i = 0; i  vi-num_queue_pairs; i++) {
+   /* Make sure we have some buffers: if oom use wq. */
+   if (!try_fill_recv(vi-rq[i], GFP_KERNEL))
+   queue_delayed_work(system_nrt_wq,
+  vi-rq[i]-refill, 0);
+   virtnet_napi_enable(vi-rq[i]);
+   }
+
+   virtnet_set_queues(vi);
+
+   return 0;
+}
+
 static int virtnet_close(struct net_device *dev)
 {
struct virtnet_info *vi = netdev_priv(dev);
@@ -1013,12 +1037,6 @@ static void virtnet_get_drvinfo(struct net_device *dev,
 
 }
 
-static const struct ethtool_ops virtnet_ethtool_ops = {
-   .get_drvinfo = virtnet_get_drvinfo,
-   .get_link = ethtool_op_get_link,
-   .get_ringparam = virtnet_get_ringparam,
-};
-
 #define MIN_MTU 68
 #define MAX_MTU 65535
 
@@ -1235,7 +1253,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
 
 err:
if (ret  names)
-   for (i = 0; i  vi-num_queue_pairs * 2; i++)
+   for (i = 0; i  total_vqs * 2; i++)
kfree(names[i]);
 
kfree(names);
@@ -1373,7 +1391,6 @@ static int virtnet_probe(struct virtio_device *vdev)
mutex_init(vi-config_lock);
vi-config_enable = true;
INIT_WORK(vi-config_work, virtnet_config_changed_work);
-   vi-num_queue_pairs = num_queue_pairs;
 
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1387,6 +1404,10 @@ static int 

Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-05 Thread Sasha Levin
On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:
 @@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
 vi-has_cvq = true;
  
 +   /* Use single tx/rx queue pair as default */
 +   vi-num_queue_pairs = 1;
 +   vi-total_queue_pairs = num_queue_pairs; 

The code is using this default even if the amount of queue pairs it
wants was specified during initialization. This basically limits any
device to use 1 pair when starting up.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-05 Thread Amos Kong
On 07/05/2012 08:51 PM, Sasha Levin wrote:
 On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:
 @@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
 vi-has_cvq = true;
  


 +   /* Use single tx/rx queue pair as default */
 +   vi-num_queue_pairs = 1;
 +   vi-total_queue_pairs = num_queue_pairs; 

vi-total_queue_pairs also should be set to 1

   vi-total_queue_pairs = 1;

 
 The code is using this default even if the amount of queue pairs it
 wants was specified during initialization. This basically limits any
 device to use 1 pair when starting up.
 
 --
 To unsubscribe from this list: send the line unsubscribe kvm in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html


-- 
Amos.
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [net-next RFC V5 5/5] virtio_net: support negotiating the number of queues through ctrl vq

2012-07-05 Thread Jason Wang

On 07/05/2012 08:51 PM, Sasha Levin wrote:

On Thu, 2012-07-05 at 18:29 +0800, Jason Wang wrote:

@@ -1387,6 +1404,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
 vi-has_cvq = true;

+   /* Use single tx/rx queue pair as default */
+   vi-num_queue_pairs = 1;
+   vi-total_queue_pairs = num_queue_pairs;

The code is using this default even if the amount of queue pairs it
wants was specified during initialization. This basically limits any
device to use 1 pair when starting up.



Yes, currently the virtio-net driver would use 1 txq/txq by default 
since multiqueue may not outperform in all kinds of workload. So it's 
better to keep it as default and let user enable multiqueue by ethtool -L.


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization