On Mon, Sep 21, 2015 at 10:25:18AM +0800, Yuanhan Liu wrote:
> On Sun, Sep 20, 2015 at 12:29:17PM +0300, Michael S. Tsirkin wrote:
> > On Fri, Sep 18, 2015 at 11:10:53PM +0800, Yuanhan Liu wrote:
> > > From: Changchun Ouyang <changchun.ouyang at intel.com>
> > > 
> > > Do not use VIRTIO_RXQ or VIRTIO_TXQ anymore; use the queue_id,
> > > instead, which will be set to a proper value for a specific queue
> > > when we have multiple queue support enabled.
> > > 
> > > For now, queue_id is still set with VIRTIO_RXQ or VIRTIO_TXQ,
> > > so it should not break anything.
> > > 
> > > Signed-off-by: Changchun Ouyang <changchun.ouyang at intel.com>
> > > Signed-off-by: Yuanhan Liu <yuanhan.liu at linux.intel.com>
> > 
> > What I'm missing is the logic that tracks which tx vq
> > did a specific flow use, to send traffic back on the correct
> > rx vq.
> > 
> > Is this something added by one of the follow-up patches?
> 
> Yeah, and also, they are offloaded to the application (examples/
> vhost/vhost-switch): patch 11 does the job.
> 
>       --yliu

Couldn't find anything dealing with flows there ...
Could you be more specific pls?

> > 
> > 
> > > ---
> > >  lib/librte_vhost/vhost_rxtx.c | 46 
> > > ++++++++++++++++++++++++++++++-------------
> > >  1 file changed, 32 insertions(+), 14 deletions(-)
> > > 
> > > diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> > > index b2b2bcc..a4ab6ca 100644
> > > --- a/lib/librte_vhost/vhost_rxtx.c
> > > +++ b/lib/librte_vhost/vhost_rxtx.c
> > > @@ -42,6 +42,16 @@
> > >  
> > >  #define MAX_PKT_BURST 32
> > >  
> > > +static inline int __attribute__((always_inline))
> > > +is_valid_virt_queue_idx(uint32_t virtq_idx, int is_tx, uint32_t 
> > > max_qp_idx)
> > > +{
> > > + if ((is_tx ^ (virtq_idx & 0x1)) ||
> > > +     (virtq_idx >= max_qp_idx * VIRTIO_QNUM))
> > > +         return 0;
> > > +
> > > + return 1;
> > > +}
> > > +
> > >  /**
> > >   * This function adds buffers to the virtio devices RX virtqueue. 
> > > Buffers can
> > >   * be received from the physical port or from another virtio device. A 
> > > packet
> > > @@ -68,12 +78,14 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t 
> > > queue_id,
> > >   uint8_t success = 0;
> > >  
> > >   LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
> > > - if (unlikely(queue_id != VIRTIO_RXQ)) {
> > > -         LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
> > > +         RTE_LOG(ERR, VHOST_DATA,
> > > +                 "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > +                 __func__, dev->device_fh, queue_id);
> > >           return 0;
> > >   }
> > >  
> > > - vq = dev->virtqueue[VIRTIO_RXQ];
> > > + vq = dev->virtqueue[queue_id];
> > >   count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
> > >  
> > >   /*
> > > @@ -235,8 +247,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t 
> > > queue_id,
> > >  }
> > >  
> > >  static inline uint32_t __attribute__((always_inline))
> > > -copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
> > > - uint16_t res_end_idx, struct rte_mbuf *pkt)
> > > +copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
> > > +                 uint16_t res_base_idx, uint16_t res_end_idx,
> > > +                 struct rte_mbuf *pkt)
> > >  {
> > >   uint32_t vec_idx = 0;
> > >   uint32_t entry_success = 0;
> > > @@ -264,7 +277,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, 
> > > uint16_t res_base_idx,
> > >    * Convert from gpa to vva
> > >    * (guest physical addr -> vhost virtual addr)
> > >    */
> > > - vq = dev->virtqueue[VIRTIO_RXQ];
> > > + vq = dev->virtqueue[queue_id];
> > >   vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
> > >   vb_hdr_addr = vb_addr;
> > >  
> > > @@ -464,11 +477,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, 
> > > uint16_t queue_id,
> > >  
> > >   LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
> > >           dev->device_fh);
> > > - if (unlikely(queue_id != VIRTIO_RXQ)) {
> > > -         LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
> > > +         RTE_LOG(ERR, VHOST_DATA,
> > > +                 "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > +                 __func__, dev->device_fh, queue_id);
> > > +         return 0;
> > >   }
> > >  
> > > - vq = dev->virtqueue[VIRTIO_RXQ];
> > > + vq = dev->virtqueue[queue_id];
> > >   count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
> > >  
> > >   if (count == 0)
> > > @@ -509,8 +525,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t 
> > > queue_id,
> > >                                                   res_cur_idx);
> > >           } while (success == 0);
> > >  
> > > -         entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
> > > -                 res_cur_idx, pkts[pkt_idx]);
> > > +         entry_success = copy_from_mbuf_to_vring(dev, queue_id,
> > > +                 res_base_idx, res_cur_idx, pkts[pkt_idx]);
> > >  
> > >           rte_compiler_barrier();
> > >  
> > > @@ -559,12 +575,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, 
> > > uint16_t queue_id,
> > >   uint16_t free_entries, entry_success = 0;
> > >   uint16_t avail_idx;
> > >  
> > > - if (unlikely(queue_id != VIRTIO_TXQ)) {
> > > -         LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
> > > +         RTE_LOG(ERR, VHOST_DATA,
> > > +                 "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > +                 __func__, dev->device_fh, queue_id);
> > >           return 0;
> > >   }
> > >  
> > > - vq = dev->virtqueue[VIRTIO_TXQ];
> > > + vq = dev->virtqueue[queue_id];
> > >   avail_idx =  *((volatile uint16_t *)&vq->avail->idx);
> > >  
> > >   /* If there are no available buffers then return. */
> > > -- 
> > > 1.9.0

Reply via email to