Re: [net-next PATCH v2 5/5] virtio_net: XDP support for adjust_head

2017-02-06 Thread Jason Wang



On 2017年02月07日 03:29, John Fastabend wrote:

On 17-02-05 11:08 PM, Jason Wang wrote:


On 2017年02月03日 11:16, John Fastabend wrote:

Add support for XDP adjust head by allocating a 256B header region
that XDP programs can grow into. This is only enabled when a XDP
program is loaded.

In order to ensure that we do not have to unwind queue headroom push
queue setup below bpf_prog_add. It reads better to do a prog ref
unwind vs another queue setup call.

At the moment this code must do a full reset to ensure old buffers
without headroom on program add or with headroom on program removal
are not used incorrectly in the datapath. Ideally we would only
have to disable/enable the RX queues being updated but there is no
API to do this at the moment in virtio so use the big hammer. In
practice it is likely not that big of a problem as this will only
happen when XDP is enabled/disabled changing programs does not
require the reset. There is some risk that the driver may either
have an allocation failure or for some reason fail to correctly
negotiate with the underlying backend in this case the driver will
be left uninitialized. I have not seen this ever happen on my test
systems and for what its worth this same failure case can occur
from probe and other contexts in virtio framework.

Signed-off-by: John Fastabend 
---


[...]


@@ -412,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
   struct bpf_prog *xdp_prog;
 len -= vi->hdr_len;
-skb_trim(skb, len);
 rcu_read_lock();
   xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -424,12 +429,16 @@ static struct sk_buff *receive_small(struct net_device
*dev,
   if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
   goto err_xdp;
   -xdp.data = skb->data;
+xdp.data_hard_start = skb->data;
+xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
   xdp.data_end = xdp.data + len;
   act = bpf_prog_run_xdp(xdp_prog, );
 switch (act) {
   case XDP_PASS:
+/* Recalculate length in case bpf program changed it */
+__skb_pull(skb, xdp.data - xdp.data_hard_start);

But skb->len were trimmed to len below which seems wrong.

I believe this is correct and it passes my basic iperf/ping tests.

When we are using small buffers with XDP, skb->data is pointing to the front
of the buffer. This space includes the XDP headroom. When we pass the skb up
to the stack we need to pull this off and point to the start of the data. But
there still is likely a bunch of room at the end of the buffer assuming the
packet is smaller than the buffer side.


+len = xdp.data_end - xdp.data;
   break;
   case XDP_TX:
   if (unlikely(!virtnet_xdp_xmit(vi, rq, , skb)))
@@ -446,6 +455,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
   }
   rcu_read_unlock();
   +skb_trim(skb, len);

So here we trim the packet to set the length to the actual payload size. The
'len' parameter passed into receive_small does not include the headroom so this
gives us the correct length of the payload.

Make sense?


Yes, you are right.




   return skb;
 err_xdp:

[...]


@@ -569,7 +580,7 @@ static struct sk_buff *receive_mergeable(struct net_device
*dev,
 page, offset, );
   if (!xdp_page)
   goto err_xdp;
-offset = 0;
+offset = VIRTIO_XDP_HEADROOM;
   } else {
   xdp_page = page;
   }
@@ -582,19 +593,30 @@ static struct sk_buff *receive_mergeable(struct
net_device *dev,
   if (unlikely(hdr->hdr.gso_type))
   goto err_xdp;
   +/* Allow consuming headroom but reserve enough space to push
+ * the descriptor on if we get an XDP_TX return code.
+ */
   data = page_address(xdp_page) + offset;
+xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;

Should be data - VIRTIO_XDP_HEADROOM I think?


If the XDP program does an adjust_head() and then a XDP_TX I want to ensure
we reserve enough headroom to push the header onto the buffer when the packet
is sent. So the additional hdr_len reserve here is intentional. Otherwise we
would need to detect this and do some type of linearize action.


I get the point.




   xdp.data = data + vi->hdr_len;
   xdp.data_end = xdp.data + (len - vi->hdr_len);
   act = bpf_prog_run_xdp(xdp_prog, );
 switch (act) {
   case XDP_PASS:
+/* recalculate offset to account for any header
+ * adjustments. Note other cases do not build an
+ * skb and avoid using offset
+ */
+offset = xdp.data -
+page_address(xdp_page) - vi->hdr_len;
+
   /* We can only create skb based on xdp_page. */
   if (unlikely(xdp_page != page)) {
   

Re: [net-next PATCH v2 5/5] virtio_net: XDP support for adjust_head

2017-02-06 Thread John Fastabend
On 17-02-05 11:08 PM, Jason Wang wrote:
> 
> 
> On 2017年02月03日 11:16, John Fastabend wrote:
>> Add support for XDP adjust head by allocating a 256B header region
>> that XDP programs can grow into. This is only enabled when a XDP
>> program is loaded.
>>
>> In order to ensure that we do not have to unwind queue headroom push
>> queue setup below bpf_prog_add. It reads better to do a prog ref
>> unwind vs another queue setup call.
>>
>> At the moment this code must do a full reset to ensure old buffers
>> without headroom on program add or with headroom on program removal
>> are not used incorrectly in the datapath. Ideally we would only
>> have to disable/enable the RX queues being updated but there is no
>> API to do this at the moment in virtio so use the big hammer. In
>> practice it is likely not that big of a problem as this will only
>> happen when XDP is enabled/disabled changing programs does not
>> require the reset. There is some risk that the driver may either
>> have an allocation failure or for some reason fail to correctly
>> negotiate with the underlying backend in this case the driver will
>> be left uninitialized. I have not seen this ever happen on my test
>> systems and for what its worth this same failure case can occur
>> from probe and other contexts in virtio framework.
>>
>> Signed-off-by: John Fastabend 
>> ---


[...]

>> @@ -412,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device 
>> *dev,
>>   struct bpf_prog *xdp_prog;
>> len -= vi->hdr_len;
>> -skb_trim(skb, len);
>> rcu_read_lock();
>>   xdp_prog = rcu_dereference(rq->xdp_prog);
>> @@ -424,12 +429,16 @@ static struct sk_buff *receive_small(struct net_device
>> *dev,
>>   if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
>>   goto err_xdp;
>>   -xdp.data = skb->data;
>> +xdp.data_hard_start = skb->data;
>> +xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
>>   xdp.data_end = xdp.data + len;
>>   act = bpf_prog_run_xdp(xdp_prog, );
>> switch (act) {
>>   case XDP_PASS:
>> +/* Recalculate length in case bpf program changed it */
>> +__skb_pull(skb, xdp.data - xdp.data_hard_start);
> 
> But skb->len were trimmed to len below which seems wrong.

I believe this is correct and it passes my basic iperf/ping tests.

When we are using small buffers with XDP, skb->data is pointing to the front
of the buffer. This space includes the XDP headroom. When we pass the skb up
to the stack we need to pull this off and point to the start of the data. But
there still is likely a bunch of room at the end of the buffer assuming the
packet is smaller than the buffer side.

> 
>> +len = xdp.data_end - xdp.data;
>>   break;
>>   case XDP_TX:
>>   if (unlikely(!virtnet_xdp_xmit(vi, rq, , skb)))
>> @@ -446,6 +455,7 @@ static struct sk_buff *receive_small(struct net_device 
>> *dev,
>>   }
>>   rcu_read_unlock();
>>   +skb_trim(skb, len);

So here we trim the packet to set the length to the actual payload size. The
'len' parameter passed into receive_small does not include the headroom so this
gives us the correct length of the payload.

Make sense?

>>   return skb;
>> err_xdp:

[...]

>> @@ -569,7 +580,7 @@ static struct sk_buff *receive_mergeable(struct 
>> net_device
>> *dev,
>> page, offset, );
>>   if (!xdp_page)
>>   goto err_xdp;
>> -offset = 0;
>> +offset = VIRTIO_XDP_HEADROOM;
>>   } else {
>>   xdp_page = page;
>>   }
>> @@ -582,19 +593,30 @@ static struct sk_buff *receive_mergeable(struct
>> net_device *dev,
>>   if (unlikely(hdr->hdr.gso_type))
>>   goto err_xdp;
>>   +/* Allow consuming headroom but reserve enough space to push
>> + * the descriptor on if we get an XDP_TX return code.
>> + */
>>   data = page_address(xdp_page) + offset;
>> +xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
> 
> Should be data - VIRTIO_XDP_HEADROOM I think?
> 

If the XDP program does an adjust_head() and then a XDP_TX I want to ensure
we reserve enough headroom to push the header onto the buffer when the packet
is sent. So the additional hdr_len reserve here is intentional. Otherwise we
would need to detect this and do some type of linearize action.

>>   xdp.data = data + vi->hdr_len;
>>   xdp.data_end = xdp.data + (len - vi->hdr_len);
>>   act = bpf_prog_run_xdp(xdp_prog, );
>> switch (act) {
>>   case XDP_PASS:
>> +/* recalculate offset to account for any header
>> + * adjustments. Note other cases do not build an
>> + * skb and avoid using offset
>> + */
>> +offset = xdp.data -
>> +page_address(xdp_page) - 

Re: [net-next PATCH v2 5/5] virtio_net: XDP support for adjust_head

2017-02-05 Thread Jason Wang



On 2017年02月03日 11:16, John Fastabend wrote:

Add support for XDP adjust head by allocating a 256B header region
that XDP programs can grow into. This is only enabled when a XDP
program is loaded.

In order to ensure that we do not have to unwind queue headroom push
queue setup below bpf_prog_add. It reads better to do a prog ref
unwind vs another queue setup call.

At the moment this code must do a full reset to ensure old buffers
without headroom on program add or with headroom on program removal
are not used incorrectly in the datapath. Ideally we would only
have to disable/enable the RX queues being updated but there is no
API to do this at the moment in virtio so use the big hammer. In
practice it is likely not that big of a problem as this will only
happen when XDP is enabled/disabled changing programs does not
require the reset. There is some risk that the driver may either
have an allocation failure or for some reason fail to correctly
negotiate with the underlying backend in this case the driver will
be left uninitialized. I have not seen this ever happen on my test
systems and for what its worth this same failure case can occur
from probe and other contexts in virtio framework.

Signed-off-by: John Fastabend 
---
  drivers/net/virtio_net.c |  154 +-
  1 file changed, 125 insertions(+), 29 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 07f9076..52a18b8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,6 +42,9 @@
  #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  #define GOOD_COPY_LEN 128
  
+/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */

+#define VIRTIO_XDP_HEADROOM 256
+
  /* RX packet size EWMA. The average packet size is used to determine the 
packet
   * buffer size when refilling RX rings. As the entire RX ring may be refilled
   * at once, the weight is chosen so that the EWMA will be insensitive to 
short-
@@ -368,6 +371,7 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
}
  
  	if (vi->mergeable_rx_bufs) {

+   xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
/* Zero header and leave csum up to XDP layers */
hdr = xdp->data;
memset(hdr, 0, vi->hdr_len);
@@ -384,7 +388,9 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
num_sg = 2;
sg_init_table(sq->sg, 2);
sg_set_buf(sq->sg, hdr, vi->hdr_len);
-   skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+   skb_to_sgvec(skb, sq->sg + 1,
+xdp->data - xdp->data_hard_start,
+xdp->data_end - xdp->data);
}
err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
   data, GFP_ATOMIC);
@@ -412,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct bpf_prog *xdp_prog;
  
  	len -= vi->hdr_len;

-   skb_trim(skb, len);
  
  	rcu_read_lock();

xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -424,12 +429,16 @@ static struct sk_buff *receive_small(struct net_device 
*dev,
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
  
-		xdp.data = skb->data;

+   xdp.data_hard_start = skb->data;
+   xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
xdp.data_end = xdp.data + len;
act = bpf_prog_run_xdp(xdp_prog, );
  
  		switch (act) {

case XDP_PASS:
+   /* Recalculate length in case bpf program changed it */
+   __skb_pull(skb, xdp.data - xdp.data_hard_start);


But skb->len were trimmed to len below which seems wrong.


+   len = xdp.data_end - xdp.data;
break;
case XDP_TX:
if (unlikely(!virtnet_xdp_xmit(vi, rq, , skb)))
@@ -446,6 +455,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
rcu_read_unlock();
  
+	skb_trim(skb, len);

return skb;
  
  err_xdp:

@@ -494,7 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue 
*rq,
   unsigned int *len)
  {
struct page *page = alloc_page(GFP_ATOMIC);
-   unsigned int page_off = 0;
+   unsigned int page_off = VIRTIO_XDP_HEADROOM;
  
  	if (!page)

return NULL;
@@ -530,7 +540,8 @@ static struct page *xdp_linearize_page(struct receive_queue 
*rq,
put_page(p);
}
  
-	*len = page_off;

+   /* Headroom does not contribute to packet length */
+   *len = page_off - VIRTIO_XDP_HEADROOM;
return page;
  err_buf:
__free_pages(page, 0);
@@ -569,7 +580,7 @@ static struct sk_buff *receive_mergeable(struct net_device 
*dev,
  

Re: [net-next PATCH v2 5/5] virtio_net: XDP support for adjust_head

2017-02-02 Thread Michael S. Tsirkin
On Thu, Feb 02, 2017 at 07:16:29PM -0800, John Fastabend wrote:
> Add support for XDP adjust head by allocating a 256B header region
> that XDP programs can grow into. This is only enabled when a XDP
> program is loaded.
> 
> In order to ensure that we do not have to unwind queue headroom push
> queue setup below bpf_prog_add. It reads better to do a prog ref
> unwind vs another queue setup call.
> 
> At the moment this code must do a full reset to ensure old buffers
> without headroom on program add or with headroom on program removal
> are not used incorrectly in the datapath. Ideally we would only
> have to disable/enable the RX queues being updated but there is no
> API to do this at the moment in virtio so use the big hammer. In
> practice it is likely not that big of a problem as this will only
> happen when XDP is enabled/disabled changing programs does not
> require the reset. There is some risk that the driver may either
> have an allocation failure or for some reason fail to correctly
> negotiate with the underlying backend in this case the driver will
> be left uninitialized. I have not seen this ever happen on my test
> systems and for what its worth this same failure case can occur
> from probe and other contexts in virtio framework.
> 
> Signed-off-by: John Fastabend 
> ---
>  drivers/net/virtio_net.c |  154 
> +-
>  1 file changed, 125 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 07f9076..52a18b8 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -42,6 +42,9 @@
>  #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
>  #define GOOD_COPY_LEN128
>  
> +/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
> +#define VIRTIO_XDP_HEADROOM 256
> +
>  /* RX packet size EWMA. The average packet size is used to determine the 
> packet
>   * buffer size when refilling RX rings. As the entire RX ring may be refilled
>   * at once, the weight is chosen so that the EWMA will be insensitive to 
> short-
> @@ -368,6 +371,7 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
>   }
>  
>   if (vi->mergeable_rx_bufs) {
> + xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
>   /* Zero header and leave csum up to XDP layers */
>   hdr = xdp->data;
>   memset(hdr, 0, vi->hdr_len);
> @@ -384,7 +388,9 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
>   num_sg = 2;
>   sg_init_table(sq->sg, 2);
>   sg_set_buf(sq->sg, hdr, vi->hdr_len);
> - skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
> + skb_to_sgvec(skb, sq->sg + 1,
> +  xdp->data - xdp->data_hard_start,
> +  xdp->data_end - xdp->data);
>   }
>   err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
>  data, GFP_ATOMIC);
> @@ -412,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device 
> *dev,
>   struct bpf_prog *xdp_prog;
>  
>   len -= vi->hdr_len;
> - skb_trim(skb, len);
>  
>   rcu_read_lock();
>   xdp_prog = rcu_dereference(rq->xdp_prog);
> @@ -424,12 +429,16 @@ static struct sk_buff *receive_small(struct net_device 
> *dev,
>   if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
>   goto err_xdp;
>  
> - xdp.data = skb->data;
> + xdp.data_hard_start = skb->data;
> + xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
>   xdp.data_end = xdp.data + len;
>   act = bpf_prog_run_xdp(xdp_prog, );
>  
>   switch (act) {
>   case XDP_PASS:
> + /* Recalculate length in case bpf program changed it */
> + __skb_pull(skb, xdp.data - xdp.data_hard_start);
> + len = xdp.data_end - xdp.data;
>   break;
>   case XDP_TX:
>   if (unlikely(!virtnet_xdp_xmit(vi, rq, , skb)))
> @@ -446,6 +455,7 @@ static struct sk_buff *receive_small(struct net_device 
> *dev,
>   }
>   rcu_read_unlock();
>  
> + skb_trim(skb, len);
>   return skb;
>  
>  err_xdp:
> @@ -494,7 +504,7 @@ static struct page *xdp_linearize_page(struct 
> receive_queue *rq,
>  unsigned int *len)
>  {
>   struct page *page = alloc_page(GFP_ATOMIC);
> - unsigned int page_off = 0;
> + unsigned int page_off = VIRTIO_XDP_HEADROOM;
>  
>   if (!page)
>   return NULL;
> @@ -530,7 +540,8 @@ static struct page *xdp_linearize_page(struct 
> receive_queue *rq,
>   put_page(p);
>   }
>  
> - *len = page_off;
> + /* Headroom does not contribute to packet length */
> + *len = page_off - VIRTIO_XDP_HEADROOM;
>   return page;
>  err_buf:
>   

[net-next PATCH v2 5/5] virtio_net: XDP support for adjust_head

2017-02-02 Thread John Fastabend
Add support for XDP adjust head by allocating a 256B header region
that XDP programs can grow into. This is only enabled when a XDP
program is loaded.

In order to ensure that we do not have to unwind queue headroom push
queue setup below bpf_prog_add. It reads better to do a prog ref
unwind vs another queue setup call.

At the moment this code must do a full reset to ensure old buffers
without headroom on program add or with headroom on program removal
are not used incorrectly in the datapath. Ideally we would only
have to disable/enable the RX queues being updated but there is no
API to do this at the moment in virtio so use the big hammer. In
practice it is likely not that big of a problem as this will only
happen when XDP is enabled/disabled changing programs does not
require the reset. There is some risk that the driver may either
have an allocation failure or for some reason fail to correctly
negotiate with the underlying backend in this case the driver will
be left uninitialized. I have not seen this ever happen on my test
systems and for what its worth this same failure case can occur
from probe and other contexts in virtio framework.

Signed-off-by: John Fastabend 
---
 drivers/net/virtio_net.c |  154 +-
 1 file changed, 125 insertions(+), 29 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 07f9076..52a18b8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,6 +42,9 @@
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN  128
 
+/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
+#define VIRTIO_XDP_HEADROOM 256
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -368,6 +371,7 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
}
 
if (vi->mergeable_rx_bufs) {
+   xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
/* Zero header and leave csum up to XDP layers */
hdr = xdp->data;
memset(hdr, 0, vi->hdr_len);
@@ -384,7 +388,9 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
num_sg = 2;
sg_init_table(sq->sg, 2);
sg_set_buf(sq->sg, hdr, vi->hdr_len);
-   skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+   skb_to_sgvec(skb, sq->sg + 1,
+xdp->data - xdp->data_hard_start,
+xdp->data_end - xdp->data);
}
err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
   data, GFP_ATOMIC);
@@ -412,7 +418,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct bpf_prog *xdp_prog;
 
len -= vi->hdr_len;
-   skb_trim(skb, len);
 
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -424,12 +429,16 @@ static struct sk_buff *receive_small(struct net_device 
*dev,
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
 
-   xdp.data = skb->data;
+   xdp.data_hard_start = skb->data;
+   xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
xdp.data_end = xdp.data + len;
act = bpf_prog_run_xdp(xdp_prog, );
 
switch (act) {
case XDP_PASS:
+   /* Recalculate length in case bpf program changed it */
+   __skb_pull(skb, xdp.data - xdp.data_hard_start);
+   len = xdp.data_end - xdp.data;
break;
case XDP_TX:
if (unlikely(!virtnet_xdp_xmit(vi, rq, , skb)))
@@ -446,6 +455,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
rcu_read_unlock();
 
+   skb_trim(skb, len);
return skb;
 
 err_xdp:
@@ -494,7 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue 
*rq,
   unsigned int *len)
 {
struct page *page = alloc_page(GFP_ATOMIC);
-   unsigned int page_off = 0;
+   unsigned int page_off = VIRTIO_XDP_HEADROOM;
 
if (!page)
return NULL;
@@ -530,7 +540,8 @@ static struct page *xdp_linearize_page(struct receive_queue 
*rq,
put_page(p);
}
 
-   *len = page_off;
+   /* Headroom does not contribute to packet length */
+   *len = page_off - VIRTIO_XDP_HEADROOM;
return page;
 err_buf:
__free_pages(page, 0);
@@ -569,7 +580,7 @@ static struct sk_buff *receive_mergeable(struct net_device 
*dev,
  page, offset, );
if (!xdp_page)