Integrate f2fs_iomap_folio_state into the f2fs page private helper functions.
In these functions, we adopt a two-stage strategy to handle the folio->private field, now supporting both direct bit flags and the new f2fs_iomap_folio_state pointer. Note that my implementation does not rely on checking the folio's order to distinguish whether the folio's private field stores a flag or an f2fs_iomap_folio_state. This is because in the folio_set_f2fs_xxx functions, we will forcibly allocate an f2fs_iomap_folio_state struct even for order-0 folios under certain conditions. The reason for doing this is that if an order-0 folio's private field is set to an f2fs private flag by a thread like gc, the generic iomap_folio_state helper functions used in iomap buffered write will mistakenly interpret it as an iomap_folio_state pointer. We cannot, or rather should not, modify fs/iomap to make it recognize f2fs's private flags. Therefore, for now, I have to uniformly allocate an f2fs_iomap_folio_state for all folios that will need to store an f2fs private flag to ensure correctness. I am also thinking about other ways to eliminate the extra memory overhead this approach introduces. Suggestions would be grateful. Signed-off-by: Nanzhe Zhao <nzz...@126.com> --- fs/f2fs/f2fs.h | 278 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 225 insertions(+), 53 deletions(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 8df0443dd189..a14bef4dc394 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -27,7 +27,10 @@ #include <linux/fscrypt.h> #include <linux/fsverity.h> - +#include <linux/iomap.h> +#ifdef CONFIG_F2FS_IOMAP_FOLIO_STATE +#include "f2fs_ifs.h" +#endif struct pagevec; #ifdef CONFIG_F2FS_CHECK_FS @@ -2509,58 +2512,227 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, return -ENOSPC; } -#define PAGE_PRIVATE_GET_FUNC(name, flagname) \ -static inline bool folio_test_f2fs_##name(const struct folio *folio) \ -{ \ - unsigned long priv = (unsigned long)folio->private; \ - unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ - (1UL << PAGE_PRIVATE_##flagname); \ - return (priv & v) == v; \ -} \ -static inline bool page_private_##name(struct page *page) \ -{ \ - return PagePrivate(page) && \ - test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ - test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ -} - -#define PAGE_PRIVATE_SET_FUNC(name, flagname) \ -static inline void folio_set_f2fs_##name(struct folio *folio) \ -{ \ - unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ - (1UL << PAGE_PRIVATE_##flagname); \ - if (!folio->private) \ - folio_attach_private(folio, (void *)v); \ - else { \ - v |= (unsigned long)folio->private; \ - folio->private = (void *)v; \ - } \ -} \ -static inline void set_page_private_##name(struct page *page) \ -{ \ - if (!PagePrivate(page)) \ - attach_page_private(page, (void *)0); \ - set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ - set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ -} - -#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ -static inline void folio_clear_f2fs_##name(struct folio *folio) \ -{ \ - unsigned long v = (unsigned long)folio->private; \ - \ - v &= ~(1UL << PAGE_PRIVATE_##flagname); \ - if (v == (1UL << PAGE_PRIVATE_NOT_POINTER)) \ - folio_detach_private(folio); \ - else \ - folio->private = (void *)v; \ -} \ -static inline void clear_page_private_##name(struct page *page) \ -{ \ - clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ - if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ - detach_page_private(page); \ +extern bool f2fs_should_use_buffered_iomap(struct inode *inode); +#ifdef CONFIG_F2FS_IOMAP_FOLIO_STATE +#define F2FS_FOLIO_PRIVATE_GET_FUNC(name, flagname) \ + static inline bool folio_test_f2fs_##name(const struct folio *folio) \ + { \ + /* First try direct folio->private access for meta folio */ \ + if (folio_test_private(folio) && \ + test_bit(PAGE_PRIVATE_NOT_POINTER, \ + (unsigned long *)&folio->private)) { \ + return test_bit(PAGE_PRIVATE_##flagname, \ + (unsigned long *)&folio->private); \ + } \ + /* For higher-order folios, use iomap folio state */ \ + struct f2fs_iomap_folio_state *fifs = \ + (struct f2fs_iomap_folio_state *)folio->private; \ + unsigned long *private_p; \ + if (unlikely(!fifs || !folio->mapping)) \ + return false; \ + /* Check magic number before accessing private data */ \ + if (READ_ONCE(fifs->read_bytes_pending) != F2FS_IFS_MAGIC) \ + return false; \ + private_p = f2fs_ifs_private_flags_ptr(fifs, folio); \ + if (!private_p) \ + return false; \ + /* Test bits directly on the 'private' slot */ \ + return test_bit(PAGE_PRIVATE_##flagname, private_p); \ + } \ + static inline bool page_private_##name(struct page *page) \ + { \ + return PagePrivate(page) && \ + test_bit(PAGE_PRIVATE_NOT_POINTER, \ + &page_private(page)) && \ + test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + } +#define F2FS_FOLIO_PRIVATE_SET_FUNC(name, flagname) \ + static inline int folio_set_f2fs_##name(struct folio *folio) \ + { \ + /* For higher-order folios, use iomap folio state */ \ + if (unlikely(!folio->mapping)) \ + return -ENOENT; \ + bool force_alloc = \ + f2fs_should_use_buffered_iomap(folio_inode(folio)); \ + if (!force_alloc && !folio_test_private(folio)) { \ + folio_attach_private(folio, (void *)0); \ + set_bit(PAGE_PRIVATE_NOT_POINTER, \ + (unsigned long *)&folio->private); \ + set_bit(PAGE_PRIVATE_##flagname, \ + (unsigned long *)&folio->private); \ + return 0; \ + } \ + struct f2fs_iomap_folio_state *fifs = \ + f2fs_ifs_alloc(folio, GFP_NOFS, true); \ + if (unlikely(!fifs)) \ + return -ENOMEM; \ + unsigned long *private_p; \ + WRITE_ONCE(fifs->read_bytes_pending, F2FS_IFS_MAGIC); \ + private_p = f2fs_ifs_private_flags_ptr(fifs, folio); \ + if (!private_p) \ + return -EINVAL; \ + /* Set the bit atomically */ \ + set_bit(PAGE_PRIVATE_##flagname, private_p); \ + /* Ensure NOT_POINTER bit is also set if any F2FS flag is set */ \ + if (PAGE_PRIVATE_##flagname != PAGE_PRIVATE_NOT_POINTER) \ + set_bit(PAGE_PRIVATE_NOT_POINTER, private_p); \ + return 0; \ + } \ + static inline void set_page_private_##name(struct page *page) \ + { \ + if (!PagePrivate(page)) \ + attach_page_private(page, (void *)0); \ + set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ + set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + } + +#define F2FS_FOLIO_PRIVATE_CLEAR_FUNC(name, flagname) \ + static inline void folio_clear_f2fs_##name(struct folio *folio) \ + { \ + /* First try direct folio->private access */ \ + if (folio_test_private(folio) && \ + test_bit(PAGE_PRIVATE_NOT_POINTER, \ + (unsigned long *)&folio->private)) { \ + clear_bit(PAGE_PRIVATE_##flagname, \ + (unsigned long *)&folio->private); \ + folio_detach_private(folio); \ + return; \ + } \ + /* For higher-order folios, use iomap folio state */ \ + struct f2fs_iomap_folio_state *fifs = \ + (struct f2fs_iomap_folio_state *)folio->private; \ + unsigned long *private_p; \ + if (unlikely(!fifs || !folio->mapping)) \ + return; \ + /* Check magic number before clearing */ \ + if (READ_ONCE(fifs->read_bytes_pending) != F2FS_IFS_MAGIC) \ + return; /* Not ours or state unclear */ \ + private_p = f2fs_ifs_private_flags_ptr(fifs, folio); \ + if (!private_p) \ + return; \ + clear_bit(PAGE_PRIVATE_##flagname, private_p); \ + } \ + static inline void clear_page_private_##name(struct page *page) \ + { \ + clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ + detach_page_private(page); \ + } +// Generate the accessor functions using the macros +F2FS_FOLIO_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); +F2FS_FOLIO_PRIVATE_GET_FUNC(inline, INLINE_INODE); +F2FS_FOLIO_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); +F2FS_FOLIO_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); +F2FS_FOLIO_PRIVATE_GET_FUNC(reference, REF_RESOURCE); + +F2FS_FOLIO_PRIVATE_SET_FUNC(reference, REF_RESOURCE); +F2FS_FOLIO_PRIVATE_SET_FUNC(inline, INLINE_INODE); +F2FS_FOLIO_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); +F2FS_FOLIO_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); + +F2FS_FOLIO_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); +F2FS_FOLIO_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); +F2FS_FOLIO_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); +F2FS_FOLIO_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); +static inline int folio_set_f2fs_data(struct folio *folio, unsigned long data) +{ + if (unlikely(!folio->mapping)) + return -ENOENT; + + struct f2fs_iomap_folio_state *fifs = + f2fs_ifs_alloc(folio, GFP_NOFS, true); + if (unlikely(!fifs)) + return -ENOMEM; + + unsigned long *private_p; + + private_p = f2fs_ifs_private_flags_ptr(fifs, folio); + if (!private_p) + return -EINVAL; + + *private_p &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); + *private_p |= (data << PAGE_PRIVATE_MAX); + set_bit(PAGE_PRIVATE_NOT_POINTER, private_p); + + return 0; } +static inline unsigned long folio_get_f2fs_data(struct folio *folio) +{ + struct f2fs_iomap_folio_state *fifs = + (struct f2fs_iomap_folio_state *)folio->private; + unsigned long *private_p; + unsigned long data_val; + + if (!folio->mapping) + return 0; + f2fs_bug_on(F2FS_I_SB(folio_inode(folio)), !fifs); + if (READ_ONCE(fifs->read_bytes_pending) != F2FS_IFS_MAGIC) + return 0; + + private_p = f2fs_ifs_private_flags_ptr(fifs, folio); + if (!private_p) + return 0; + + data_val = READ_ONCE(*private_p); + + if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data_val)) + return 0; + + return data_val >> PAGE_PRIVATE_MAX; +} +#else +#define PAGE_PRIVATE_GET_FUNC(name, flagname) \ + static inline bool folio_test_f2fs_##name(const struct folio *folio) \ + { \ + unsigned long priv = (unsigned long)folio->private; \ + unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ + (1UL << PAGE_PRIVATE_##flagname); \ + return (priv & v) == v; \ + } \ + static inline bool page_private_##name(struct page *page) \ + { \ + return PagePrivate(page) && \ + test_bit(PAGE_PRIVATE_NOT_POINTER, \ + &page_private(page)) && \ + test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + } + +#define PAGE_PRIVATE_SET_FUNC(name, flagname) \ + static inline void folio_set_f2fs_##name(struct folio *folio) \ + { \ + unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ + (1UL << PAGE_PRIVATE_##flagname); \ + if (!folio->private) \ + folio_attach_private(folio, (void *)v); \ + else { \ + v |= (unsigned long)folio->private; \ + folio->private = (void *)v; \ + } \ + } \ + static inline void set_page_private_##name(struct page *page) \ + { \ + if (!PagePrivate(page)) \ + attach_page_private(page, (void *)0); \ + set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ + set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + } + +#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ + static inline void folio_clear_f2fs_##name(struct folio *folio) \ + { \ + unsigned long v = (unsigned long)folio->private; \ + v &= ~(1UL << PAGE_PRIVATE_##flagname); \ + if (v == (1UL << PAGE_PRIVATE_NOT_POINTER)) \ + folio_detach_private(folio); \ + else \ + folio->private = (void *)v; \ + } \ + static inline void clear_page_private_##name(struct page *page) \ + { \ + clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ + if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ + detach_page_private(page); \ + } PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); @@ -2595,7 +2767,7 @@ static inline void folio_set_f2fs_data(struct folio *folio, unsigned long data) else folio->private = (void *)((unsigned long)folio->private | data); } - +#endif static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, block_t count) -- 2.34.1 _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel