If combining xdp_adjust_head and xdp_adjust_meta, then it is possible
to make data_meta overlap with area used by xdp_frame.  And another
invocation of xdp_adjust_head can then clear that area, due to
clearing of xdp_frame area.

The easiest solution I found was to simply not allow
xdp_buff->data_meta to overlap with area used by xdp_frame.

Fixes: 6dfb970d3dbd ("xdp: avoid leaking info stored in frame data on page 
reuse")
Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
---
 net/core/filter.c |   11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/net/core/filter.c b/net/core/filter.c
index 15e9b5477360..e3623e741181 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2701,6 +2701,11 @@ BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, 
int, offset)
                     data > xdp->data_end - ETH_HLEN))
                return -EINVAL;
 
+       /* Disallow data_meta to use xdp_frame area */
+       if (metalen > 0 &&
+           unlikely((data - metalen) < xdp_frame_end))
+               return -EINVAL;
+
        /* Avoid info leak, when reusing area prev used by xdp_frame */
        if (data < xdp_frame_end) {
                unsigned long clearlen = xdp_frame_end - data;
@@ -2734,6 +2739,7 @@ static const struct bpf_func_proto 
bpf_xdp_adjust_head_proto = {
 
 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
 {
+       void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
        void *meta = xdp->data_meta + offset;
        unsigned long metalen = xdp->data - meta;
 
@@ -2742,6 +2748,11 @@ BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, 
int, offset)
        if (unlikely(meta < xdp->data_hard_start ||
                     meta > xdp->data))
                return -EINVAL;
+
+       /* Disallow data_meta to use xdp_frame area */
+       if (unlikely(meta < xdp_frame_end))
+               return -EINVAL;
+
        if (unlikely((metalen & (sizeof(__u32) - 1)) ||
                     (metalen > 32)))
                return -EACCES;

Reply via email to