Re: [RFC PATCH v5 06/23] vdpa: Add x-svq to NetdevVhostVDPAOptions
On Thu, Apr 14, 2022 at 5:42 AM Jason Wang wrote: > > > 在 2022/4/8 21:33, Eugenio Pérez 写道: > > Finally offering the possibility to enable SVQ from the command line. > > > > Signed-off-by: Eugenio Pérez > > --- > > qapi/net.json| 9 - > > net/vhost-vdpa.c | 48 > > 2 files changed, 48 insertions(+), 9 deletions(-) > > > > diff --git a/qapi/net.json b/qapi/net.json > > index b92f3f5fb4..92848e4362 100644 > > --- a/qapi/net.json > > +++ b/qapi/net.json > > @@ -445,12 +445,19 @@ > > # @queues: number of queues to be created for multiqueue vhost-vdpa > > # (default: 1) > > # > > +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) > > +# (default: false) > > +# > > +# Features: > > +# @unstable: Member @x-svq is experimental. > > +# > > # Since: 5.1 > > ## > > { 'struct': 'NetdevVhostVDPAOptions', > > 'data': { > > '*vhostdev': 'str', > > -'*queues': 'int' } } > > +'*queues': 'int', > > +'*x-svq':{'type': 'bool', 'features' : [ 'unstable'] } } } > > > > ## > > # @NetClientDriver: > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > index 1e9fe47c03..def738998b 100644 > > --- a/net/vhost-vdpa.c > > +++ b/net/vhost-vdpa.c > > @@ -127,7 +127,11 @@ err_init: > > static void vhost_vdpa_cleanup(NetClientState *nc) > > { > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > > +struct vhost_dev *dev = s->vhost_vdpa.dev; > > > > +if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) { > > +g_clear_pointer(>vhost_vdpa.iova_tree, vhost_iova_tree_delete); > > +} > > if (s->vhost_net) { > > vhost_net_cleanup(s->vhost_net); > > g_free(s->vhost_net); > > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = { > > .check_peer_type = vhost_vdpa_check_peer_type, > > }; > > > > +static int vhost_vdpa_get_iova_range(int fd, > > + struct vhost_vdpa_iova_range > > *iova_range) > > +{ > > +int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); > > + > > +return ret < 0 ? -errno : 0; > > +} > > + > > static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > > - const char *device, > > - const char *name, > > - int vdpa_device_fd, > > - int queue_pair_index, > > - int nvqs, > > - bool is_datapath) > > + const char *device, > > + const char *name, > > + int vdpa_device_fd, > > + int queue_pair_index, > > + int nvqs, > > + bool is_datapath, > > > It's better not mix style changes with the logic changes. > Sure, I did not realize I changed the alignment here. Next version will only add the parameters. Thanks! > Other looks fine. > > Thanks > > > > + bool svq, > > + VhostIOVATree *iova_tree) > > { > > NetClientState *nc = NULL; > > VhostVDPAState *s; > > @@ -211,6 +225,8 @@ static NetClientState > > *net_vhost_vdpa_init(NetClientState *peer, > > > > s->vhost_vdpa.device_fd = vdpa_device_fd; > > s->vhost_vdpa.index = queue_pair_index; > > +s->vhost_vdpa.shadow_vqs_enabled = svq; > > +s->vhost_vdpa.iova_tree = iova_tree; > > ret = vhost_vdpa_add(nc, (void *)>vhost_vdpa, queue_pair_index, > > nvqs); > > if (ret) { > > qemu_del_net_client(nc); > > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > g_autofree NetClientState **ncs = NULL; > > NetClientState *nc; > > int queue_pairs, i, has_cvq = 0; > > +g_autoptr(VhostIOVATree) iova_tree = NULL; > > > > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > opts = >u.vhost_vdpa; > > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const > > char *name, > > qemu_close(vdpa_device_fd); > > return queue_pairs; > > } > > +if (opts->x_svq) { > > +struct vhost_vdpa_iova_range iova_range; > > + > > +if (has_cvq) { > > +error_setg(errp, "vdpa svq does not work with cvq"); > > +goto err_svq; > > +} > > +vhost_vdpa_get_iova_range(vdpa_device_fd, _range); > > +iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); > > +} > > > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs); > > > > for (i = 0; i < queue_pairs; i++) { > > ncs[i] = net_vhost_vdpa_init(peer,
Re: [RFC PATCH v5 06/23] vdpa: Add x-svq to NetdevVhostVDPAOptions
在 2022/4/8 21:33, Eugenio Pérez 写道: Finally offering the possibility to enable SVQ from the command line. Signed-off-by: Eugenio Pérez --- qapi/net.json| 9 - net/vhost-vdpa.c | 48 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/qapi/net.json b/qapi/net.json index b92f3f5fb4..92848e4362 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -445,12 +445,19 @@ # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) +# (default: false) +# +# Features: +# @unstable: Member @x-svq is experimental. +# # Since: 5.1 ## { 'struct': 'NetdevVhostVDPAOptions', 'data': { '*vhostdev': 'str', -'*queues': 'int' } } +'*queues': 'int', +'*x-svq':{'type': 'bool', 'features' : [ 'unstable'] } } } ## # @NetClientDriver: diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 1e9fe47c03..def738998b 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -127,7 +127,11 @@ err_init: static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); +struct vhost_dev *dev = s->vhost_vdpa.dev; +if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) { +g_clear_pointer(>vhost_vdpa.iova_tree, vhost_iova_tree_delete); +} if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = { .check_peer_type = vhost_vdpa_check_peer_type, }; +static int vhost_vdpa_get_iova_range(int fd, + struct vhost_vdpa_iova_range *iova_range) +{ +int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); + +return ret < 0 ? -errno : 0; +} + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, - const char *device, - const char *name, - int vdpa_device_fd, - int queue_pair_index, - int nvqs, - bool is_datapath) + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath, It's better not mix style changes with the logic changes. Other looks fine. Thanks + bool svq, + VhostIOVATree *iova_tree) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; +s->vhost_vdpa.shadow_vqs_enabled = svq; +s->vhost_vdpa.iova_tree = iova_tree; ret = vhost_vdpa_add(nc, (void *)>vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, g_autofree NetClientState **ncs = NULL; NetClientState *nc; int queue_pairs, i, has_cvq = 0; +g_autoptr(VhostIOVATree) iova_tree = NULL; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = >u.vhost_vdpa; @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, qemu_close(vdpa_device_fd); return queue_pairs; } +if (opts->x_svq) { +struct vhost_vdpa_iova_range iova_range; + +if (has_cvq) { +error_setg(errp, "vdpa svq does not work with cvq"); +goto err_svq; +} +vhost_vdpa_get_iova_range(vdpa_device_fd, _range); +iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); +} ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 2, true); + vdpa_device_fd, i, 2, true, opts->x_svq, + iova_tree); if (!ncs[i]) goto err; } if (has_cvq) { nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 1, false); + vdpa_device_fd, i, 1, false, opts->x_svq, + iova_tree); if (!nc) goto err; } +iova_tree =
[RFC PATCH v5 06/23] vdpa: Add x-svq to NetdevVhostVDPAOptions
Finally offering the possibility to enable SVQ from the command line. Signed-off-by: Eugenio Pérez --- qapi/net.json| 9 - net/vhost-vdpa.c | 48 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/qapi/net.json b/qapi/net.json index b92f3f5fb4..92848e4362 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -445,12 +445,19 @@ # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) +# (default: false) +# +# Features: +# @unstable: Member @x-svq is experimental. +# # Since: 5.1 ## { 'struct': 'NetdevVhostVDPAOptions', 'data': { '*vhostdev': 'str', -'*queues': 'int' } } +'*queues': 'int', +'*x-svq':{'type': 'bool', 'features' : [ 'unstable'] } } } ## # @NetClientDriver: diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 1e9fe47c03..def738998b 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -127,7 +127,11 @@ err_init: static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); +struct vhost_dev *dev = s->vhost_vdpa.dev; +if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) { +g_clear_pointer(>vhost_vdpa.iova_tree, vhost_iova_tree_delete); +} if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = { .check_peer_type = vhost_vdpa_check_peer_type, }; +static int vhost_vdpa_get_iova_range(int fd, + struct vhost_vdpa_iova_range *iova_range) +{ +int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); + +return ret < 0 ? -errno : 0; +} + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, - const char *device, - const char *name, - int vdpa_device_fd, - int queue_pair_index, - int nvqs, - bool is_datapath) + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath, + bool svq, + VhostIOVATree *iova_tree) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; +s->vhost_vdpa.shadow_vqs_enabled = svq; +s->vhost_vdpa.iova_tree = iova_tree; ret = vhost_vdpa_add(nc, (void *)>vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, g_autofree NetClientState **ncs = NULL; NetClientState *nc; int queue_pairs, i, has_cvq = 0; +g_autoptr(VhostIOVATree) iova_tree = NULL; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = >u.vhost_vdpa; @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, qemu_close(vdpa_device_fd); return queue_pairs; } +if (opts->x_svq) { +struct vhost_vdpa_iova_range iova_range; + +if (has_cvq) { +error_setg(errp, "vdpa svq does not work with cvq"); +goto err_svq; +} +vhost_vdpa_get_iova_range(vdpa_device_fd, _range); +iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); +} ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 2, true); + vdpa_device_fd, i, 2, true, opts->x_svq, + iova_tree); if (!ncs[i]) goto err; } if (has_cvq) { nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 1, false); + vdpa_device_fd, i, 1, false, opts->x_svq, + iova_tree); if (!nc) goto err; } +iova_tree = NULL; return 0; err: if (i) { qemu_del_net_client(ncs[0]); } + +err_svq: qemu_close(vdpa_device_fd); return -1; -- 2.27.0