Introduce the `F2FS_GET_BLOCK_IOMAP` flag for `f2fs_ma
p_blocks`.

    With this flag, holes encountered during buffered I/O
iterative mapping
    can now be merged under `map_is_mergeable`. Furthermor
e, when this flag
    is passed, `f2fs_map_blocks` will by default store the
 mapped block
    information (from the `f2fs_map_blocks` structure) int
o the extent cache,
    provided the resulting extent size is greater than the
 minimum allowed
    length for the f2fs extent cache.
    Notably, both holes and `NEW_ADDR`
    extents will also be cached under the influence of thi
s flag.
    This improves buffered write performance for sparse fi
les.

    Additionally, two helper functions are introduced:
    - `f2fs_map_blocks_iomap`: A simple wrapper for `f2fs_
map_blocks` that
      enables the `F2FS_GET_BLOCK_IOMAP` flag.
    - `f2fs_map_blocks_prealloc`: A simple wrapper for usi
ng
      `f2fs_map_blocks` to preallocate blocks.

Signed-off-by: Nanzhe Zhao <nzz...@126.com>
---
 fs/f2fs/data.c | 49 +++++++++++++++++++++++++++++++++++++++++++------
 fs/f2fs/f2fs.h |  5 +++++
 2 files changed, 48 insertions(+), 6 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5ecd08a3dd0b..37eaf431ab42 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1537,8 +1537,11 @@ static bool map_is_mergeable(struct f2fs_sb_info *sbi,
                return true;
        if (flag == F2FS_GET_BLOCK_PRE_DIO)
                return true;
-       if (flag == F2FS_GET_BLOCK_DIO &&
-               map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
+       if (flag == F2FS_GET_BLOCK_DIO && map->m_pblk == NULL_ADDR &&
+           blkaddr == NULL_ADDR)
+               return true;
+       if (flag == F2FS_GET_BLOCK_IOMAP && map->m_pblk == NULL_ADDR &&
+           blkaddr == NULL_ADDR)
                return true;
        return false;
 }
@@ -1676,6 +1679,10 @@ int f2fs_map_blocks(struct inode *inode, struct 
f2fs_map_blocks *map, int flag)
                        if (map->m_next_pgofs)
                                *map->m_next_pgofs = pgofs + 1;
                        break;
+               case F2FS_GET_BLOCK_IOMAP:
+                       if (map->m_next_pgofs)
+                               *map->m_next_pgofs = pgofs + 1;
+                       break;
                default:
                        /* for defragment case */
                        if (map->m_next_pgofs)
@@ -1741,8 +1748,9 @@ int f2fs_map_blocks(struct inode *inode, struct 
f2fs_map_blocks *map, int flag)
        else if (dn.ofs_in_node < end_offset)
                goto next_block;
 
-       if (flag == F2FS_GET_BLOCK_PRECACHE) {
-               if (map->m_flags & F2FS_MAP_MAPPED) {
+       if (flag == F2FS_GET_BLOCK_PRECACHE || flag == F2FS_GET_BLOCK_IOMAP) {
+               if (map->m_flags & F2FS_MAP_MAPPED &&
+                   map->m_len > F2FS_MIN_EXTENT_LEN) {
                        unsigned int ofs = start_pgofs - map->m_lblk;
 
                        f2fs_update_read_extent_cache_range(&dn,
@@ -1786,8 +1794,9 @@ int f2fs_map_blocks(struct inode *inode, struct 
f2fs_map_blocks *map, int flag)
                }
        }
 
-       if (flag == F2FS_GET_BLOCK_PRECACHE) {
-               if (map->m_flags & F2FS_MAP_MAPPED) {
+       if (flag == F2FS_GET_BLOCK_PRECACHE || flag == F2FS_GET_BLOCK_IOMAP) {
+               if (map->m_flags & F2FS_MAP_MAPPED &&
+                   map->m_len > F2FS_MIN_EXTENT_LEN) {
                        unsigned int ofs = start_pgofs - map->m_lblk;
 
                        f2fs_update_read_extent_cache_range(&dn,
@@ -1808,6 +1817,34 @@ int f2fs_map_blocks(struct inode *inode, struct 
f2fs_map_blocks *map, int flag)
        return err;
 }
 
+int f2fs_map_blocks_iomap(struct inode *inode, block_t start, block_t len,
+                         struct f2fs_map_blocks *map)
+{
+       int err = 0;
+
+       map->m_lblk = start; // Logical block number for the start pos
+       map->m_len = len; // Length in blocks
+       map->m_may_create = false;
+       map->m_seg_type =
+               f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), inode->i_write_hint);
+       err = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_IOMAP);
+       return err;
+}
+
+int f2fs_map_blocks_preallocate(struct inode *inode, block_t start, block_t 
len,
+                               struct f2fs_map_blocks *map)
+{
+       int err = 0;
+
+       map->m_lblk = start;
+       map->m_len = len; // Length in blocks
+       map->m_may_create = true;
+       map->m_seg_type =
+               f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), inode->i_write_hint);
+       err = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_PRE_AIO);
+       return err;
+}
+
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
 {
        struct f2fs_map_blocks map;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c6b23fa63588..ac9a6ac13e1f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -788,6 +788,7 @@ enum {
        F2FS_GET_BLOCK_PRE_DIO,
        F2FS_GET_BLOCK_PRE_AIO,
        F2FS_GET_BLOCK_PRECACHE,
+       F2FS_GET_BLOCK_IOMAP,
 };
 
 /*
@@ -4232,6 +4233,10 @@ struct folio *f2fs_get_new_data_folio(struct inode 
*inode,
                        struct folio *ifolio, pgoff_t index, bool new_i_size);
 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int 
flag);
+int f2fs_map_blocks_iomap(struct inode *inode, block_t start, block_t len,
+                         struct f2fs_map_blocks *map);
+int f2fs_map_blocks_preallocate(struct inode *inode, block_t start, block_t 
len,
+                               struct f2fs_map_blocks *map);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        u64 start, u64 len);
 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
-- 
2.34.1



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to