Add a parameter for minimum blocksize in the netfs_i_context struct.  This
can be used, for instance, to force I/O alignment for content encryption.
It also requires the use of an RMW cycle if a write we want to do doesn't
meet the block alignment requirements.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/buffered_read.c  | 26 ++++++++++++++++++++++----
 fs/netfs/buffered_write.c |  3 ++-
 fs/netfs/direct_read.c    |  3 ++-
 include/linux/netfs.h     |  2 ++
 4 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index ab9f8e123245..e06461ef0bfa 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -527,14 +527,26 @@ int netfs_prefetch_for_write(struct file *file, struct 
folio *folio,
        struct address_space *mapping = folio_file_mapping(folio);
        struct netfs_inode *ctx = netfs_inode(mapping->host);
        unsigned long long start = folio_pos(folio);
-       size_t flen = folio_size(folio);
+       unsigned long long i_size, rstart, end;
+       size_t rlen;
        int ret;
 
-       _enter("%zx @%llx", flen, start);
+       DEFINE_READAHEAD(ractl, file, NULL, mapping, folio_index(folio));
+
+       _enter("%zx @%llx", len, start);
 
        ret = -ENOMEM;
 
-       rreq = netfs_alloc_request(mapping, file, start, flen,
+       i_size = i_size_read(mapping->host);
+       end = round_up(start + len, 1U << ctx->min_bshift);
+       if (end > i_size) {
+               unsigned long long limit = round_up(start + len, PAGE_SIZE);
+               end = max(limit, round_up(i_size, PAGE_SIZE));
+       }
+       rstart = round_down(start, 1U << ctx->min_bshift);
+       rlen   = end - rstart;
+
+       rreq = netfs_alloc_request(mapping, file, rstart, rlen,
                                   NETFS_READ_FOR_WRITE);
        if (IS_ERR(rreq)) {
                ret = PTR_ERR(rreq);
@@ -548,7 +560,13 @@ int netfs_prefetch_for_write(struct file *file, struct 
folio *folio,
                goto error_put;
 
        netfs_stat(&netfs_n_rh_write_begin);
-       trace_netfs_read(rreq, start, flen, 
netfs_read_trace_prefetch_for_write);
+       trace_netfs_read(rreq, rstart, rlen, 
netfs_read_trace_prefetch_for_write);
+
+       /* Expand the request to meet caching requirements and download
+        * preferences.
+        */
+       ractl._nr_pages = folio_nr_pages(folio);
+       netfs_rreq_expand(rreq, &ractl);
 
        /* Set up the output buffer */
        iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index d5a5a315fbd3..7163fcc05206 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -80,7 +80,8 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct 
netfs_inode *ctx,
        if (file->f_mode & FMODE_READ)
                return NETFS_JUST_PREFETCH;
 
-       if (netfs_is_cache_enabled(ctx))
+       if (netfs_is_cache_enabled(ctx) ||
+           ctx->min_bshift > 0)
                return NETFS_JUST_PREFETCH;
 
        if (!finfo)
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 1d26468aafd9..52ad8fa66dd5 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -185,7 +185,8 @@ static ssize_t netfs_unbuffered_read_iter_locked(struct 
kiocb *iocb, struct iov_
         * will then need to pad the request out to the minimum block size.
         */
        if (test_bit(NETFS_RREQ_USE_BOUNCE_BUFFER, &rreq->flags)) {
-               start = rreq->start;
+               min_bsize = 1ULL << ctx->min_bshift;
+               start = round_down(rreq->start, min_bsize);
                end = min_t(unsigned long long,
                            round_up(rreq->start + rreq->len, min_bsize),
                            ctx->remote_i_size);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index fb4f4f826b93..6244f7a9a44a 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -141,6 +141,7 @@ struct netfs_inode {
        unsigned long           flags;
 #define NETFS_ICTX_ODIRECT     0               /* The file has DIO in progress 
*/
 #define NETFS_ICTX_UNBUFFERED  1               /* I/O should not use the 
pagecache */
+       unsigned char           min_bshift;     /* log2 min block size for 
bounding box or 0 */
 };
 
 /*
@@ -462,6 +463,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
        ctx->remote_i_size = i_size_read(&ctx->inode);
        ctx->zero_point = ctx->remote_i_size;
        ctx->flags = 0;
+       ctx->min_bshift = 0;
 #if IS_ENABLED(CONFIG_FSCACHE)
        ctx->cache = NULL;
 #endif
--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to