Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-29 Thread Jason Wang



On 2018年07月24日 11:28, Tonghao Zhang wrote:

On Tue, Jul 24, 2018 at 10:53 AM Toshiaki Makita
  wrote:

On 2018/07/24 2:31, Tonghao Zhang wrote:

On Mon, Jul 23, 2018 at 10:20 PM Toshiaki Makita
  wrote:

On 18/07/23 (月) 21:43, Tonghao Zhang wrote:

On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
  wrote:

On 2018/07/22 3:04,xiangxia.m@gmail.com  wrote:

From: Tonghao Zhang

Factor out generic busy polling logic and will be
used for in tx path in the next patch. And with the patch,
qemu can set differently the busyloop_timeout for rx queue.

Signed-off-by: Tonghao Zhang
---

...

+static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
+  struct vhost_virtqueue *rvq,
+  struct vhost_virtqueue *tvq,
+  bool rx)
+{
+ struct socket *sock = rvq->private_data;
+
+ if (rx) {
+ if (!vhost_vq_avail_empty(>dev, tvq)) {
+ vhost_poll_queue(>poll);
+ } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
+ vhost_disable_notify(>dev, tvq);
+ vhost_poll_queue(>poll);
+ }
+ } else if ((sock && sk_has_rx_data(sock->sk)) &&
+ !vhost_vq_avail_empty(>dev, rvq)) {
+ vhost_poll_queue(>poll);

Now we wait for vq_avail for rx as well, I think you cannot skip
vhost_enable_notify() on tx. Probably you might want to do:

I think vhost_enable_notify is needed.


} else if (sock && sk_has_rx_data(sock->sk)) {
  if (!vhost_vq_avail_empty(>dev, rvq)) {
  vhost_poll_queue(>poll);
  } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
  vhost_disable_notify(>dev, rvq);
  vhost_poll_queue(>poll);
  }
}

As Jason review as before, we only want rx kick when packet is pending at
socket but we're out of available buffers. So we just enable notify,
but not poll it ?

  } else if ((sock && sk_has_rx_data(sock->sk)) &&
  !vhost_vq_avail_empty(>dev, rvq)) {
  vhost_poll_queue(>poll);
  else {
  vhost_enable_notify(>dev, rvq);
  }

When vhost_enable_notify() returns true the avail becomes non-empty
while we are enabling notify. We may delay the rx process if we don't
check the return value of vhost_enable_notify().

I got it thanks.

Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on tx?

I cant find why it is better, if necessary, we can do it.

The reason is pretty simple... we are busypolling the socket so we don't
need rx wakeups during it?

OK, but one question, how about rx? do we use the
vhost_net_disable_vq/vhost_net_ensable_vq on rx ?

If we are busypolling the sock tx buf? I'm not sure if polling it
improves the performance.

Not the sock tx buff, when we are busypolling in handle_rx, we will
check the tx vring via  vhost_vq_avail_empty.
So, should we the disable tvq, e.g. vhost_net_disable_vq(net, tvq)?> --


This could be done on top since tx wakeups only happnes when we run out 
of sndbuf.


Thanks

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Toshiaki Makita
On 2018/07/24 12:28, Tonghao Zhang wrote:
> On Tue, Jul 24, 2018 at 10:53 AM Toshiaki Makita
>  wrote:
>>
>> On 2018/07/24 2:31, Tonghao Zhang wrote:
>>> On Mon, Jul 23, 2018 at 10:20 PM Toshiaki Makita
>>>  wrote:

 On 18/07/23 (月) 21:43, Tonghao Zhang wrote:
> On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
>  wrote:
>>
>> On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
>>> From: Tonghao Zhang 
>>>
>>> Factor out generic busy polling logic and will be
>>> used for in tx path in the next patch. And with the patch,
>>> qemu can set differently the busyloop_timeout for rx queue.
>>>
>>> Signed-off-by: Tonghao Zhang 
>>> ---
>> ...
>>> +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
>>> +  struct vhost_virtqueue *rvq,
>>> +  struct vhost_virtqueue *tvq,
>>> +  bool rx)
>>> +{
>>> + struct socket *sock = rvq->private_data;
>>> +
>>> + if (rx) {
>>> + if (!vhost_vq_avail_empty(>dev, tvq)) {
>>> + vhost_poll_queue(>poll);
>>> + } else if (unlikely(vhost_enable_notify(>dev, tvq))) 
>>> {
>>> + vhost_disable_notify(>dev, tvq);
>>> + vhost_poll_queue(>poll);
>>> + }
>>> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
>>> + !vhost_vq_avail_empty(>dev, rvq)) {
>>> + vhost_poll_queue(>poll);
>>
>> Now we wait for vq_avail for rx as well, I think you cannot skip
>> vhost_enable_notify() on tx. Probably you might want to do:
> I think vhost_enable_notify is needed.
>
>> } else if (sock && sk_has_rx_data(sock->sk)) {
>>  if (!vhost_vq_avail_empty(>dev, rvq)) {
>>  vhost_poll_queue(>poll);
>>  } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
>>  vhost_disable_notify(>dev, rvq);
>>  vhost_poll_queue(>poll);
>>  }
>> }
> As Jason review as before, we only want rx kick when packet is pending at
> socket but we're out of available buffers. So we just enable notify,
> but not poll it ?
>
>  } else if ((sock && sk_has_rx_data(sock->sk)) &&
>  !vhost_vq_avail_empty(>dev, rvq)) {
>  vhost_poll_queue(>poll);
>  else {
>  vhost_enable_notify(>dev, rvq);
>  }

 When vhost_enable_notify() returns true the avail becomes non-empty
 while we are enabling notify. We may delay the rx process if we don't
 check the return value of vhost_enable_notify().
>>> I got it thanks.
>> Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on 
>> tx?
> I cant find why it is better, if necessary, we can do it.

 The reason is pretty simple... we are busypolling the socket so we don't
 need rx wakeups during it?
>>> OK, but one question, how about rx? do we use the
>>> vhost_net_disable_vq/vhost_net_ensable_vq on rx ?
>>
>> If we are busypolling the sock tx buf? I'm not sure if polling it
>> improves the performance.
> Not the sock tx buff, when we are busypolling in handle_rx, we will
> check the tx vring via  vhost_vq_avail_empty.
> So, should we the disable tvq, e.g. vhost_net_disable_vq(net, tvq)?> --

When you want to stop vq kicks from the guest you should call
vhost_disable_notify() and when you want to stop vq wakeups from the
socket you should call vhost_net_disable_vq().

You are polling vq_avail so you want to stop vq kicks thus
vhost_disable_notify() is needed and it is already called.

-- 
Toshiaki Makita

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Tonghao Zhang
On Tue, Jul 24, 2018 at 10:53 AM Toshiaki Makita
 wrote:
>
> On 2018/07/24 2:31, Tonghao Zhang wrote:
> > On Mon, Jul 23, 2018 at 10:20 PM Toshiaki Makita
> >  wrote:
> >>
> >> On 18/07/23 (月) 21:43, Tonghao Zhang wrote:
> >>> On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
> >>>  wrote:
> 
>  On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
> > From: Tonghao Zhang 
> >
> > Factor out generic busy polling logic and will be
> > used for in tx path in the next patch. And with the patch,
> > qemu can set differently the busyloop_timeout for rx queue.
> >
> > Signed-off-by: Tonghao Zhang 
> > ---
>  ...
> > +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
> > +  struct vhost_virtqueue *rvq,
> > +  struct vhost_virtqueue *tvq,
> > +  bool rx)
> > +{
> > + struct socket *sock = rvq->private_data;
> > +
> > + if (rx) {
> > + if (!vhost_vq_avail_empty(>dev, tvq)) {
> > + vhost_poll_queue(>poll);
> > + } else if (unlikely(vhost_enable_notify(>dev, tvq))) 
> > {
> > + vhost_disable_notify(>dev, tvq);
> > + vhost_poll_queue(>poll);
> > + }
> > + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> > + !vhost_vq_avail_empty(>dev, rvq)) {
> > + vhost_poll_queue(>poll);
> 
>  Now we wait for vq_avail for rx as well, I think you cannot skip
>  vhost_enable_notify() on tx. Probably you might want to do:
> >>> I think vhost_enable_notify is needed.
> >>>
>  } else if (sock && sk_has_rx_data(sock->sk)) {
>   if (!vhost_vq_avail_empty(>dev, rvq)) {
>   vhost_poll_queue(>poll);
>   } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
>   vhost_disable_notify(>dev, rvq);
>   vhost_poll_queue(>poll);
>   }
>  }
> >>> As Jason review as before, we only want rx kick when packet is pending at
> >>> socket but we're out of available buffers. So we just enable notify,
> >>> but not poll it ?
> >>>
> >>>  } else if ((sock && sk_has_rx_data(sock->sk)) &&
> >>>  !vhost_vq_avail_empty(>dev, rvq)) {
> >>>  vhost_poll_queue(>poll);
> >>>  else {
> >>>  vhost_enable_notify(>dev, rvq);
> >>>  }
> >>
> >> When vhost_enable_notify() returns true the avail becomes non-empty
> >> while we are enabling notify. We may delay the rx process if we don't
> >> check the return value of vhost_enable_notify().
> > I got it thanks.
>  Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on 
>  tx?
> >>> I cant find why it is better, if necessary, we can do it.
> >>
> >> The reason is pretty simple... we are busypolling the socket so we don't
> >> need rx wakeups during it?
> > OK, but one question, how about rx? do we use the
> > vhost_net_disable_vq/vhost_net_ensable_vq on rx ?
>
> If we are busypolling the sock tx buf? I'm not sure if polling it
> improves the performance.
Not the sock tx buff, when we are busypolling in handle_rx, we will
check the tx vring via  vhost_vq_avail_empty.
So, should we the disable tvq, e.g. vhost_net_disable_vq(net, tvq)?> --
> Toshiaki Makita
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Toshiaki Makita
On 2018/07/24 2:31, Tonghao Zhang wrote:
> On Mon, Jul 23, 2018 at 10:20 PM Toshiaki Makita
>  wrote:
>>
>> On 18/07/23 (月) 21:43, Tonghao Zhang wrote:
>>> On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
>>>  wrote:

 On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
> From: Tonghao Zhang 
>
> Factor out generic busy polling logic and will be
> used for in tx path in the next patch. And with the patch,
> qemu can set differently the busyloop_timeout for rx queue.
>
> Signed-off-by: Tonghao Zhang 
> ---
 ...
> +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
> +  struct vhost_virtqueue *rvq,
> +  struct vhost_virtqueue *tvq,
> +  bool rx)
> +{
> + struct socket *sock = rvq->private_data;
> +
> + if (rx) {
> + if (!vhost_vq_avail_empty(>dev, tvq)) {
> + vhost_poll_queue(>poll);
> + } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
> + vhost_disable_notify(>dev, tvq);
> + vhost_poll_queue(>poll);
> + }
> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> + !vhost_vq_avail_empty(>dev, rvq)) {
> + vhost_poll_queue(>poll);

 Now we wait for vq_avail for rx as well, I think you cannot skip
 vhost_enable_notify() on tx. Probably you might want to do:
>>> I think vhost_enable_notify is needed.
>>>
 } else if (sock && sk_has_rx_data(sock->sk)) {
  if (!vhost_vq_avail_empty(>dev, rvq)) {
  vhost_poll_queue(>poll);
  } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
  vhost_disable_notify(>dev, rvq);
  vhost_poll_queue(>poll);
  }
 }
>>> As Jason review as before, we only want rx kick when packet is pending at
>>> socket but we're out of available buffers. So we just enable notify,
>>> but not poll it ?
>>>
>>>  } else if ((sock && sk_has_rx_data(sock->sk)) &&
>>>  !vhost_vq_avail_empty(>dev, rvq)) {
>>>  vhost_poll_queue(>poll);
>>>  else {
>>>  vhost_enable_notify(>dev, rvq);
>>>  }
>>
>> When vhost_enable_notify() returns true the avail becomes non-empty
>> while we are enabling notify. We may delay the rx process if we don't
>> check the return value of vhost_enable_notify().
> I got it thanks.
 Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on 
 tx?
>>> I cant find why it is better, if necessary, we can do it.
>>
>> The reason is pretty simple... we are busypolling the socket so we don't
>> need rx wakeups during it?
> OK, but one question, how about rx? do we use the
> vhost_net_disable_vq/vhost_net_ensable_vq on rx ?

If we are busypolling the sock tx buf? I'm not sure if polling it
improves the performance.

-- 
Toshiaki Makita

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Tonghao Zhang
On Mon, Jul 23, 2018 at 10:20 PM Toshiaki Makita
 wrote:
>
> On 18/07/23 (月) 21:43, Tonghao Zhang wrote:
> > On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
> >  wrote:
> >>
> >> On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
> >>> From: Tonghao Zhang 
> >>>
> >>> Factor out generic busy polling logic and will be
> >>> used for in tx path in the next patch. And with the patch,
> >>> qemu can set differently the busyloop_timeout for rx queue.
> >>>
> >>> Signed-off-by: Tonghao Zhang 
> >>> ---
> >> ...
> >>> +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
> >>> +  struct vhost_virtqueue *rvq,
> >>> +  struct vhost_virtqueue *tvq,
> >>> +  bool rx)
> >>> +{
> >>> + struct socket *sock = rvq->private_data;
> >>> +
> >>> + if (rx) {
> >>> + if (!vhost_vq_avail_empty(>dev, tvq)) {
> >>> + vhost_poll_queue(>poll);
> >>> + } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
> >>> + vhost_disable_notify(>dev, tvq);
> >>> + vhost_poll_queue(>poll);
> >>> + }
> >>> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> >>> + !vhost_vq_avail_empty(>dev, rvq)) {
> >>> + vhost_poll_queue(>poll);
> >>
> >> Now we wait for vq_avail for rx as well, I think you cannot skip
> >> vhost_enable_notify() on tx. Probably you might want to do:
> > I think vhost_enable_notify is needed.
> >
> >> } else if (sock && sk_has_rx_data(sock->sk)) {
> >>  if (!vhost_vq_avail_empty(>dev, rvq)) {
> >>  vhost_poll_queue(>poll);
> >>  } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
> >>  vhost_disable_notify(>dev, rvq);
> >>  vhost_poll_queue(>poll);
> >>  }
> >> }
> > As Jason review as before, we only want rx kick when packet is pending at
> > socket but we're out of available buffers. So we just enable notify,
> > but not poll it ?
> >
> >  } else if ((sock && sk_has_rx_data(sock->sk)) &&
> >  !vhost_vq_avail_empty(>dev, rvq)) {
> >  vhost_poll_queue(>poll);
> >  else {
> >  vhost_enable_notify(>dev, rvq);
> >  }
>
> When vhost_enable_notify() returns true the avail becomes non-empty
> while we are enabling notify. We may delay the rx process if we don't
> check the return value of vhost_enable_notify().
I got it thanks.
> >> Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on 
> >> tx?
> > I cant find why it is better, if necessary, we can do it.
>
> The reason is pretty simple... we are busypolling the socket so we don't
> need rx wakeups during it?
OK, but one question, how about rx? do we use the
vhost_net_disable_vq/vhost_net_ensable_vq on rx ?
> --
> Toshiaki Makita
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Tonghao Zhang
On Mon, Jul 23, 2018 at 5:58 PM Toshiaki Makita
 wrote:
>
> On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
> > From: Tonghao Zhang 
> >
> > Factor out generic busy polling logic and will be
> > used for in tx path in the next patch. And with the patch,
> > qemu can set differently the busyloop_timeout for rx queue.
> >
> > Signed-off-by: Tonghao Zhang 
> > ---
> ...
> > +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
> > +  struct vhost_virtqueue *rvq,
> > +  struct vhost_virtqueue *tvq,
> > +  bool rx)
> > +{
> > + struct socket *sock = rvq->private_data;
> > +
> > + if (rx) {
> > + if (!vhost_vq_avail_empty(>dev, tvq)) {
> > + vhost_poll_queue(>poll);
> > + } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
> > + vhost_disable_notify(>dev, tvq);
> > + vhost_poll_queue(>poll);
> > + }
> > + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> > + !vhost_vq_avail_empty(>dev, rvq)) {
> > + vhost_poll_queue(>poll);
>
> Now we wait for vq_avail for rx as well, I think you cannot skip
> vhost_enable_notify() on tx. Probably you might want to do:
I think vhost_enable_notify is needed.

> } else if (sock && sk_has_rx_data(sock->sk)) {
> if (!vhost_vq_avail_empty(>dev, rvq)) {
> vhost_poll_queue(>poll);
> } else if (unlikely(vhost_enable_notify(>dev, rvq))) {
> vhost_disable_notify(>dev, rvq);
> vhost_poll_queue(>poll);
> }
> }
As Jason review as before, we only want rx kick when packet is pending at
socket but we're out of available buffers. So we just enable notify,
but not poll it ?

} else if ((sock && sk_has_rx_data(sock->sk)) &&
!vhost_vq_avail_empty(>dev, rvq)) {
vhost_poll_queue(>poll);
else {
vhost_enable_notify(>dev, rvq);
}
> Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on tx?
I cant find why it is better, if necessary, we can do it.
> --
> Toshiaki Makita
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-23 Thread Toshiaki Makita
On 2018/07/22 3:04, xiangxia.m@gmail.com wrote:
> From: Tonghao Zhang 
> 
> Factor out generic busy polling logic and will be
> used for in tx path in the next patch. And with the patch,
> qemu can set differently the busyloop_timeout for rx queue.
> 
> Signed-off-by: Tonghao Zhang 
> ---
...
> +static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
> +  struct vhost_virtqueue *rvq,
> +  struct vhost_virtqueue *tvq,
> +  bool rx)
> +{
> + struct socket *sock = rvq->private_data;
> +
> + if (rx) {
> + if (!vhost_vq_avail_empty(>dev, tvq)) {
> + vhost_poll_queue(>poll);
> + } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
> + vhost_disable_notify(>dev, tvq);
> + vhost_poll_queue(>poll);
> + }
> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> + !vhost_vq_avail_empty(>dev, rvq)) {
> + vhost_poll_queue(>poll);

Now we wait for vq_avail for rx as well, I think you cannot skip
vhost_enable_notify() on tx. Probably you might want to do:

} else if (sock && sk_has_rx_data(sock->sk)) {
if (!vhost_vq_avail_empty(>dev, rvq)) {
vhost_poll_queue(>poll);
} else if (unlikely(vhost_enable_notify(>dev, rvq))) {
vhost_disable_notify(>dev, rvq);
vhost_poll_queue(>poll);
}
}

Also it's better to care vhost_net_disable_vq()/vhost_net_enable_vq() on tx?

-- 
Toshiaki Makita

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()

2018-07-21 Thread xiangxia . m . yue
From: Tonghao Zhang 

Factor out generic busy polling logic and will be
used for in tx path in the next patch. And with the patch,
qemu can set differently the busyloop_timeout for rx queue.

Signed-off-by: Tonghao Zhang 
---
 drivers/vhost/net.c | 114 ++--
 1 file changed, 74 insertions(+), 40 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 321264c..2dc937e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -428,6 +428,78 @@ static int vhost_net_enable_vq(struct vhost_net *n,
return vhost_poll_start(poll, sock->file);
 }
 
+static int sk_has_rx_data(struct sock *sk)
+{
+   struct socket *sock = sk->sk_socket;
+
+   if (sock->ops->peek_len)
+   return sock->ops->peek_len(sock);
+
+   return skb_queue_empty(>sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_vq_check(struct vhost_net *net,
+struct vhost_virtqueue *rvq,
+struct vhost_virtqueue *tvq,
+bool rx)
+{
+   struct socket *sock = rvq->private_data;
+
+   if (rx) {
+   if (!vhost_vq_avail_empty(>dev, tvq)) {
+   vhost_poll_queue(>poll);
+   } else if (unlikely(vhost_enable_notify(>dev, tvq))) {
+   vhost_disable_notify(>dev, tvq);
+   vhost_poll_queue(>poll);
+   }
+   } else if ((sock && sk_has_rx_data(sock->sk)) &&
+   !vhost_vq_avail_empty(>dev, rvq)) {
+   vhost_poll_queue(>poll);
+   }
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+   struct vhost_virtqueue *rvq,
+   struct vhost_virtqueue *tvq,
+   bool *busyloop_intr,
+   bool rx)
+{
+   unsigned long busyloop_timeout;
+   unsigned long endtime;
+   struct socket *sock;
+   struct vhost_virtqueue *vq = rx ? tvq : rvq;
+
+   mutex_lock_nested(>mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+   vhost_disable_notify(>dev, vq);
+   sock = rvq->private_data;
+
+   busyloop_timeout = rx ? rvq->busyloop_timeout:
+   tvq->busyloop_timeout;
+
+   preempt_disable();
+   endtime = busy_clock() + busyloop_timeout;
+
+   while (vhost_can_busy_poll(endtime)) {
+   if (vhost_has_work(>dev)) {
+   *busyloop_intr = true;
+   break;
+   }
+
+   if ((sock && sk_has_rx_data(sock->sk) &&
+!vhost_vq_avail_empty(>dev, rvq)) ||
+   !vhost_vq_avail_empty(>dev, tvq))
+   break;
+
+   cpu_relax();
+   }
+
+   preempt_enable();
+
+   vhost_net_busy_poll_vq_check(net, rvq, tvq, rx);
+
+   mutex_unlock(>mutex);
+}
+
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
@@ -631,16 +703,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, 
struct sock *sk)
return len;
 }
 
-static int sk_has_rx_data(struct sock *sk)
-{
-   struct socket *sock = sk->sk_socket;
-
-   if (sock->ops->peek_len)
-   return sock->ops->peek_len(sock);
-
-   return skb_queue_empty(>sk_receive_queue);
-}
-
 static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
 {
struct vhost_virtqueue *vq = >vq;
@@ -660,41 +722,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net 
*net, struct sock *sk,
struct vhost_net_virtqueue *tnvq = >vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *rvq = >vq;
struct vhost_virtqueue *tvq = >vq;
-   unsigned long uninitialized_var(endtime);
int len = peek_head_len(rnvq, sk);
 
-   if (!len && tvq->busyloop_timeout) {
+   if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
vhost_rx_signal_used(rnvq);
/* Both tx vq and rx socket were polled here */
-   mutex_lock_nested(>mutex, VHOST_NET_VQ_TX);
-   vhost_disable_notify(>dev, tvq);
-
-   preempt_disable();
-   endtime = busy_clock() + tvq->busyloop_timeout;
-
-   while (vhost_can_busy_poll(endtime)) {
-   if (vhost_has_work(>dev)) {
-   *busyloop_intr = true;
-   break;
-   }
-   if ((sk_has_rx_data(sk) &&
-!vhost_vq_avail_empty(>dev, rvq)) ||
-   !vhost_vq_avail_empty(>dev, tvq))
-   break;
-   cpu_relax();
-   }
-
-   preempt_enable();
-
-