From: Omar Sandoval <osan...@fb.com>

For encoded writes, we need the raw pages for reading compressed data
directly via a bio. So, replace kvmalloc() with vmap() so we have access
to the raw pages. 144k is large enough that it usually gets allocated
with vmalloc(), anyways.

Signed-off-by: Omar Sandoval <osan...@fb.com>
---
 fs/btrfs/send.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 98948568017c..25b1a60a568c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -81,6 +81,7 @@ struct send_ctx {
        char *send_buf;
        u32 send_size;
        u32 send_max_size;
+       struct page **send_buf_pages;
        u64 total_send_size;
        u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
        u64 flags;      /* 'flags' member of btrfs_ioctl_send_args is u64 */
@@ -7203,6 +7204,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct 
btrfs_ioctl_send_args *arg)
        struct btrfs_root *clone_root;
        struct send_ctx *sctx = NULL;
        u32 i;
+       u32 send_buf_num_pages = 0;
        u64 *clone_sources_tmp = NULL;
        int clone_sources_to_rollback = 0;
        size_t alloc_size;
@@ -7283,10 +7285,28 @@ long btrfs_ioctl_send(struct file *mnt_file, struct 
btrfs_ioctl_send_args *arg)
        if (sctx->flags & BTRFS_SEND_FLAG_STREAM_V2) {
                sctx->send_max_size = ALIGN(SZ_16K + BTRFS_MAX_COMPRESSED,
                                            PAGE_SIZE);
+               send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT;
+               sctx->send_buf_pages = kcalloc(send_buf_num_pages,
+                                              sizeof(*sctx->send_buf_pages),
+                                              GFP_KERNEL);
+               if (!sctx->send_buf_pages) {
+                       send_buf_num_pages = 0;
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               for (i = 0; i < send_buf_num_pages; i++) {
+                       sctx->send_buf_pages[i] = alloc_page(GFP_KERNEL);
+                       if (!sctx->send_buf_pages[i]) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+               }
+               sctx->send_buf = vmap(sctx->send_buf_pages, send_buf_num_pages,
+                                     VM_MAP, PAGE_KERNEL);
        } else {
                sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1;
+               sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
        }
-       sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
        if (!sctx->send_buf) {
                ret = -ENOMEM;
                goto out;
@@ -7495,7 +7515,16 @@ long btrfs_ioctl_send(struct file *mnt_file, struct 
btrfs_ioctl_send_args *arg)
                        fput(sctx->send_filp);
 
                kvfree(sctx->clone_roots);
-               kvfree(sctx->send_buf);
+               if (sctx->flags & BTRFS_SEND_FLAG_STREAM_V2) {
+                       vunmap(sctx->send_buf);
+                       for (i = 0; i < send_buf_num_pages; i++) {
+                               if (sctx->send_buf_pages[i])
+                                       __free_page(sctx->send_buf_pages[i]);
+                       }
+                       kfree(sctx->send_buf_pages);
+               } else {
+                       kvfree(sctx->send_buf);
+               }
 
                name_cache_free(sctx);
 
-- 
2.30.0

Reply via email to