We always provide a single dst page, it's unclear why the io_copy_cache
complexity is required.

So let's simplify and get rid of "struct io_copy_cache", simply working on
the single page.

... which immediately allows us for dropping one "nth_page" usage,
because it's really just a single page.

Cc: Jens Axboe <ax...@kernel.dk>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 io_uring/zcrx.c | 32 +++++++-------------------------
 1 file changed, 7 insertions(+), 25 deletions(-)

diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index e5ff49f3425e0..f29b2a4867516 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -954,29 +954,18 @@ static struct net_iov *io_zcrx_alloc_fallback(struct 
io_zcrx_area *area)
        return niov;
 }
 
-struct io_copy_cache {
-       struct page             *page;
-       unsigned long           offset;
-       size_t                  size;
-};
-
-static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
+static ssize_t io_copy_page(struct page *dst_page, struct page *src_page,
                            unsigned int src_offset, size_t len)
 {
-       size_t copied = 0;
+       size_t dst_offset = 0;
 
-       len = min(len, cc->size);
+       len = min(len, PAGE_SIZE);
 
        while (len) {
                void *src_addr, *dst_addr;
-               struct page *dst_page = cc->page;
-               unsigned dst_offset = cc->offset;
                size_t n = len;
 
-               if (folio_test_partial_kmap(page_folio(dst_page)) ||
-                   folio_test_partial_kmap(page_folio(src_page))) {
-                       dst_page = nth_page(dst_page, dst_offset / PAGE_SIZE);
-                       dst_offset = offset_in_page(dst_offset);
+               if (folio_test_partial_kmap(page_folio(src_page))) {
                        src_page = nth_page(src_page, src_offset / PAGE_SIZE);
                        src_offset = offset_in_page(src_offset);
                        n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
@@ -991,12 +980,10 @@ static ssize_t io_copy_page(struct io_copy_cache *cc, 
struct page *src_page,
                kunmap_local(src_addr);
                kunmap_local(dst_addr);
 
-               cc->size -= n;
-               cc->offset += n;
+               dst_offset += n;
                len -= n;
-               copied += n;
        }
-       return copied;
+       return dst_offset;
 }
 
 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq 
*ifq,
@@ -1011,7 +998,6 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, 
struct io_zcrx_ifq *ifq,
                return -EFAULT;
 
        while (len) {
-               struct io_copy_cache cc;
                struct net_iov *niov;
                size_t n;
 
@@ -1021,11 +1007,7 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, 
struct io_zcrx_ifq *ifq,
                        break;
                }
 
-               cc.page = io_zcrx_iov_page(niov);
-               cc.offset = 0;
-               cc.size = PAGE_SIZE;
-
-               n = io_copy_page(&cc, src_page, src_offset, len);
+               n = io_copy_page(io_zcrx_iov_page(niov), src_page, src_offset, 
len);
 
                if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
                        io_zcrx_return_niov(niov);
-- 
2.50.1


Reply via email to