[dpdk-dev] [PATCH v3 02/12] net/virtio: setup and start cq in configure callback

2016-11-08 Thread Olivier Matz
Hi Lei,

On 11/02/2016 02:38 AM, Yao, Lei A wrote:
> Hi, Olivier
> 
> During the validation work with v16.11-rc2, I find that this patch will cause 
> VM crash if enable virtio bonding in VM. Could you have a check at your side? 
> The following is steps at my side. Thanks a lot
> 
> 1. bind PF port to igb_uio.
> modprobe uio
> insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
> ./tools/dpdk-devbind.py --bind=igb_uio 84:00.1
> 
> 2. start vhost switch.
> ./examples/vhost/build/vhost-switch -c 0x1c -n 4 --socket-mem 4096,4096 - 
> -p 0x1 --mergeable 0 --vm2vm 0 --socket-file ./vhost-net
> 
> 3. bootup one vm with four virtio net device
> qemu-system-x86_64 \
> -name vm0 -enable-kvm -chardev 
> socket,path=/tmp/vm0_qga0.sock,server,nowait,id=vm0_qga0 \
> -device virtio-serial -device 
> virtserialport,chardev=vm0_qga0,name=org.qemu.guest_agent.0 \
> -daemonize -monitor unix:/tmp/vm0_monitor.sock,server,nowait \
> -net nic,vlan=0,macaddr=00:00:00:c7:56:64,addr=1f \
> net user,vlan=0,hostfwd=tcp:10.239.129.127:6107:22 \
> -chardev socket,id=char0,path=./vhost-net \
> -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \
> -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01 \
> -chardev socket,id=char1,path=./vhost-net \
> -netdev type=vhost-user,id=netdev1,chardev=char1,vhostforce \
> -device virtio-net-pci,netdev=netdev1,mac=52:54:00:00:00:02 \
> -chardev socket,id=char2,path=./vhost-net \
> -netdev type=vhost-user,id=netdev2,chardev=char2,vhostforce \
> -device virtio-net-pci,netdev=netdev2,mac=52:54:00:00:00:03 \
> -chardev socket,id=char3,path=./vhost-net \
> -netdev type=vhost-user,id=netdev3,chardev=char3,vhostforce \
> -device virtio-net-pci,netdev=netdev3,mac=52:54:00:00:00:04 \
> -cpu host -smp 8 -m 4096 \
> -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \
> -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu16.img -vnc 
> :10
> 
> 4. on vm:
> bind virtio net device to igb_uio
> modprobe uio
> insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
> tools/dpdk-devbind.py --bind=igb_uio 00:04.0 00:05.0 00:06.0 00:07.0
> 5. startup test_pmd app
> ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 - -i --txqflags=0xf00 
> --disable-hw-vlan-filter
> 6. create one bonding device (port 4)
> create bonded device 0 0 (the first 0: mode, the second: the socket number)
> show bonding config 4
> 7. bind port 0, 1, 2 to port 4
> add bonding slave 0 4
> add bonding slave 1 4
> add bonding slave 2 4
> port start 4
> Result: just after port start 4(port 4 is bonded port), the vm shutdown 
> immediately.

Sorry for the late answer. I reproduced the issue on rc2, and I confirm
that Yuanhan's patchset fixes it in rc3.

Regards,
Olivier


[dpdk-dev] [PATCH v3 02/12] net/virtio: setup and start cq in configure callback

2016-11-02 Thread Yao, Lei A
Hi, Olivier

During the validation work with v16.11-rc2, I find that this patch will cause 
VM crash if enable virtio bonding in VM. Could you have a check at your side? 
The following is steps at my side. Thanks a lot

1. bind PF port to igb_uio.
modprobe uio
insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
./tools/dpdk-devbind.py --bind=igb_uio 84:00.1

2. start vhost switch.
./examples/vhost/build/vhost-switch -c 0x1c -n 4 --socket-mem 4096,4096 - 
-p 0x1 --mergeable 0 --vm2vm 0 --socket-file ./vhost-net

3. bootup one vm with four virtio net device
qemu-system-x86_64 \
-name vm0 -enable-kvm -chardev 
socket,path=/tmp/vm0_qga0.sock,server,nowait,id=vm0_qga0 \
-device virtio-serial -device 
virtserialport,chardev=vm0_qga0,name=org.qemu.guest_agent.0 \
-daemonize -monitor unix:/tmp/vm0_monitor.sock,server,nowait \
-net nic,vlan=0,macaddr=00:00:00:c7:56:64,addr=1f \
net user,vlan=0,hostfwd=tcp:10.239.129.127:6107:22 \
-chardev socket,id=char0,path=./vhost-net \
-netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \
-device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01 \
-chardev socket,id=char1,path=./vhost-net \
-netdev type=vhost-user,id=netdev1,chardev=char1,vhostforce \
-device virtio-net-pci,netdev=netdev1,mac=52:54:00:00:00:02 \
-chardev socket,id=char2,path=./vhost-net \
-netdev type=vhost-user,id=netdev2,chardev=char2,vhostforce \
-device virtio-net-pci,netdev=netdev2,mac=52:54:00:00:00:03 \
-chardev socket,id=char3,path=./vhost-net \
-netdev type=vhost-user,id=netdev3,chardev=char3,vhostforce \
-device virtio-net-pci,netdev=netdev3,mac=52:54:00:00:00:04 \
-cpu host -smp 8 -m 4096 \
-object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \
-numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu16.img -vnc 
:10

4. on vm:
bind virtio net device to igb_uio
modprobe uio
insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
tools/dpdk-devbind.py --bind=igb_uio 00:04.0 00:05.0 00:06.0 00:07.0
5. startup test_pmd app
./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1f -n 4 - -i --txqflags=0xf00 
--disable-hw-vlan-filter
6. create one bonding device (port 4)
create bonded device 0 0 (the first 0: mode, the second: the socket number)
show bonding config 4
7. bind port 0, 1, 2 to port 4
add bonding slave 0 4
add bonding slave 1 4
add bonding slave 2 4
port start 4
Result: just after port start 4(port 4 is bonded port), the vm shutdown 
immediately.

BRs
Lei

-Original Message-
From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Olivier Matz
Sent: Thursday, October 13, 2016 10:16 PM
To: dev at dpdk.org; yuanhan.liu at linux.intel.com
Cc: Ananyev, Konstantin ; Chandran, Sugesh 
; Richardson, Bruce ; Tan, Jianfeng ; Zhang, Helin 
; adrien.mazarguil at 6wind.com; stephen at 
networkplumber.org; dprovan at bivio.net; Wang, Xiao W ; maxime.coquelin at redhat.com
Subject: [dpdk-dev] [PATCH v3 02/12] net/virtio: setup and start cq in 
configure callback

Move the configuration of control queue in the configure callback.
This is needed by next commit, which introduces the reinitialization of the 
device in the configure callback to change the feature flags.
Therefore, the control queue will have to be restarted at the same place.

As virtio_dev_cq_queue_setup() is called from a place where
config->max_virtqueue_pairs is not available, we need to store this in
the private structure. It replaces max_rx_queues and max_tx_queues which have 
the same value. The log showing the value of max_rx_queues and max_tx_queues is 
also removed since config->max_virtqueue_pairs is already displayed above.

Signed-off-by: Olivier Matz 
Reviewed-by: Maxime Coquelin 
---
 drivers/net/virtio/virtio_ethdev.c | 43 +++---
 drivers/net/virtio/virtio_ethdev.h |  4 ++--
 drivers/net/virtio/virtio_pci.h|  3 +--
 3 files changed, 24 insertions(+), 26 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c 
b/drivers/net/virtio/virtio_ethdev.c
index 77ca569..f3921ac 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -552,6 +552,9 @@ virtio_dev_close(struct rte_eth_dev *dev)
if (hw->started == 1)
virtio_dev_stop(dev);

+   if (hw->cvq)
+   virtio_dev_queue_release(hw->cvq->vq);
+
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR); @@ -1191,16 +1194,7 
@@ virtio_init_device(struct rte_eth_dev *eth_dev)
config->max_virtqueue_pairs = 1;
}

-   hw->max_rx_queues =
-   (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
-   VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
-   hw->max_tx_queues =
-   (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
- 

[dpdk-dev] [PATCH v3 02/12] net/virtio: setup and start cq in configure callback

2016-10-13 Thread Olivier Matz
Move the configuration of control queue in the configure callback.
This is needed by next commit, which introduces the reinitialization
of the device in the configure callback to change the feature flags.
Therefore, the control queue will have to be restarted at the same
place.

As virtio_dev_cq_queue_setup() is called from a place where
config->max_virtqueue_pairs is not available, we need to store this in
the private structure. It replaces max_rx_queues and max_tx_queues which
have the same value. The log showing the value of max_rx_queues and
max_tx_queues is also removed since config->max_virtqueue_pairs is
already displayed above.

Signed-off-by: Olivier Matz 
Reviewed-by: Maxime Coquelin 
---
 drivers/net/virtio/virtio_ethdev.c | 43 +++---
 drivers/net/virtio/virtio_ethdev.h |  4 ++--
 drivers/net/virtio/virtio_pci.h|  3 +--
 3 files changed, 24 insertions(+), 26 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c 
b/drivers/net/virtio/virtio_ethdev.c
index 77ca569..f3921ac 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -552,6 +552,9 @@ virtio_dev_close(struct rte_eth_dev *dev)
if (hw->started == 1)
virtio_dev_stop(dev);

+   if (hw->cvq)
+   virtio_dev_queue_release(hw->cvq->vq);
+
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
@@ -1191,16 +1194,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev)
config->max_virtqueue_pairs = 1;
}

-   hw->max_rx_queues =
-   (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
-   VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
-   hw->max_tx_queues =
-   (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
-   VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
-
-   virtio_dev_cq_queue_setup(eth_dev,
-   config->max_virtqueue_pairs * 2,
-   SOCKET_ID_ANY);
+   hw->max_queue_pairs = config->max_virtqueue_pairs;

PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
config->max_virtqueue_pairs);
@@ -1211,19 +1205,15 @@ virtio_init_device(struct rte_eth_dev *eth_dev)
config->mac[2], config->mac[3],
config->mac[4], config->mac[5]);
} else {
-   hw->max_rx_queues = 1;
-   hw->max_tx_queues = 1;
+   PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
+   hw->max_queue_pairs = 1;
}

-   PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d   hw->max_tx_queues=%d",
-   hw->max_rx_queues, hw->max_tx_queues);
if (pci_dev)
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);

-   virtio_dev_cq_start(eth_dev);
-
return 0;
 }

@@ -1285,7 +1275,6 @@ static int
 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
 {
struct rte_pci_device *pci_dev;
-   struct virtio_hw *hw = eth_dev->data->dev_private;

PMD_INIT_FUNC_TRACE();

@@ -1301,9 +1290,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;

-   if (hw->cvq)
-   virtio_dev_queue_release(hw->cvq->vq);
-
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;

@@ -1352,6 +1338,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 {
const struct rte_eth_rxmode *rxmode = >data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+   int ret;

PMD_INIT_LOG(DEBUG, "configure");

@@ -1360,6 +1347,16 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}

+   /* Setup and start control queue */
+   if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
+   ret = virtio_dev_cq_queue_setup(dev,
+   hw->max_queue_pairs * 2,
+   SOCKET_ID_ANY);
+   if (ret < 0)
+   return ret;
+   virtio_dev_cq_start(dev);
+   }
+
hw->vlan_strip = rxmode->hw_vlan_strip;

if (rxmode->hw_vlan_filter
@@ -1553,8 +1550,10 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->driver_name = dev->driver->pci_drv.driver.name;
else
dev_info->driver_name = "virtio_user PMD";
-   dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
-   dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+   dev_info->max_rx_queues =
+