bcachefs currently populates fiemap data from the extents btree. This works correctly when the fiemap sync flag is provided, but if not, it skips all delalloc extents that have not yet been flushed. This is because delalloc extents from buffered writes are first stored as reservation in the pagecache, and only become resident in the extents btree after writeback completes.
Update the fiemap implementation to scan the pagecache for data for file ranges that are not present in the extents btree. This uses the preexisting seek data/hole mechanism to identify data ranges, and then formats them as delayed allocation extents in the fiemap info. This is done by faking up an extent key and passing that along to the fiemap fill handler. We also tweak bch2_fiemap() to save fiemap flags for the previous key in order to track that it is delalloc. One caveat worth noting with respect to fiemap and COW is that extent btree data is reported even when dirty pagecache exists over the associated range of the file. This means the range is reallocated on the next writeback and thus fiemap data is technically out of date. This is not necessarily a serious issue given fiemap is racy by definition, the final location of the unflushed data is unknown, and the caller should probably use the sync flag for most up to date information. FWIW, btrfs exhibits this same behavior wrt to dirty pagecache over COW extents as well, so this patch brings bcachefs to functional parity with btrfs. Signed-off-by: Brian Foster <[email protected]> --- fs/bcachefs/fs.c | 60 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index bc280a0a957d..0b3b35092818 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -868,6 +868,41 @@ static int bch2_fill_extent(struct bch_fs *c, } } +/* + * Scan a gap in the extent btree for delayed allocation in pagecache. If found, + * fake up an extent key so it looks like an extent to the rest of the fiemap + * processing code. + */ +static bool +bch2_fiemap_scan_pagecache(struct inode *vinode, + u64 start, + u64 end, + struct bkey_buf *cur) +{ + struct bch_fs *c = vinode->i_sb->s_fs_info; + struct bch_inode_info *ei = to_bch_ei(vinode); + struct bkey_i_extent *delextent; + struct bch_extent_ptr ptr = {}; + + start = bch2_seek_pagecache_data(vinode, start, end, 0, false); + if (start >= end) + return false; + end = bch2_seek_pagecache_hole(vinode, start, end, 0, false); + + /* + * Create a fake extent key in the buffer. We have to add a dummy extent + * pointer for the fill code to add an extent entry. It's explicitly + * zeroed to reflect delayed allocation (i.e. phys offset 0). + */ + bch2_bkey_buf_realloc(cur, c, sizeof(*delextent) / sizeof(u64)); + delextent = bkey_extent_init(cur->k); + delextent->k.p = POS(ei->v.i_ino, start >> 9); + bch2_key_resize(&delextent->k, (end - start) >> 9); + bch2_bkey_append_ptr(&delextent->k_i, ptr); + + return true; +} + static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, u64 start, u64 len) { @@ -879,6 +914,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, struct bkey_buf cur, prev; struct bpos end = POS(ei->v.i_ino, (start + len) >> 9); unsigned offset_into_extent, sectors; + unsigned cflags, pflags; bool have_extent = false; u32 snapshot; int ret = 0; @@ -916,6 +952,19 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, continue; } + /* + * Outstanding buffered writes aren't tracked in the extent + * btree until dirty folios are written back. Check holes in the + * extent tree for data in pagecache and report it as delalloc. + */ + if (iter.pos.offset > start && + bch2_fiemap_scan_pagecache(vinode, start << 9, + iter.pos.offset << 9, &cur)) { + cflags = FIEMAP_EXTENT_DELALLOC; + start = bkey_start_offset(&cur.k->k) + cur.k->k.size; + goto fill; + } + offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); sectors = k.k->size - offset_into_extent; @@ -940,19 +989,22 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, cur.k->k.p = iter.pos; cur.k->k.p.offset += cur.k->k.size; + cflags = 0; + start = iter.pos.offset + sectors; +fill: if (have_extent) { bch2_trans_unlock(trans); ret = bch2_fill_extent(c, info, - bkey_i_to_s_c(prev.k), 0); + bkey_i_to_s_c(prev.k), pflags); if (ret) break; } bkey_copy(prev.k, cur.k); + pflags = cflags; have_extent = true; - bch2_btree_iter_set_pos(&iter, - POS(iter.pos.inode, iter.pos.offset + sectors)); + bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, start)); } start = iter.pos.offset; bch2_trans_iter_exit(trans, &iter); @@ -963,7 +1015,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, if (!ret && have_extent) { bch2_trans_unlock(trans); ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k), - FIEMAP_EXTENT_LAST); + pflags|FIEMAP_EXTENT_LAST); } bch2_trans_put(trans); -- 2.42.0
