Re: [PATCH v11 24/40] btrfs: cache if block-group is on a sequential zone

2021-01-12 Thread Josef Bacik

On 12/21/20 10:49 PM, Naohiro Aota wrote:

From: Johannes Thumshirn 

In zoned mode, cache if a block-group is on a sequential write only zone.
On sequential write only zones, we can use REQ_OP_ZONE_APPEND for writing
of data, therefore provide btrfs_use_zone_append() to figure out if I/O is
targeting a sequential write only zone and we can use said
REQ_OP_ZONE_APPEND for data writing.

Signed-off-by: Johannes Thumshirn 


Reviewed-by: Josef Bacik 

Thanks,

Josef


[PATCH v11 24/40] btrfs: cache if block-group is on a sequential zone

2020-12-21 Thread Naohiro Aota
From: Johannes Thumshirn 

In zoned mode, cache if a block-group is on a sequential write only zone.
On sequential write only zones, we can use REQ_OP_ZONE_APPEND for writing
of data, therefore provide btrfs_use_zone_append() to figure out if I/O is
targeting a sequential write only zone and we can use said
REQ_OP_ZONE_APPEND for data writing.

Signed-off-by: Johannes Thumshirn 
---
 fs/btrfs/block-group.h |  2 ++
 fs/btrfs/zoned.c   | 29 +
 fs/btrfs/zoned.h   |  5 +
 3 files changed, 36 insertions(+)

diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 9df00ada09f9..a1d96c4cfa3b 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -184,6 +184,8 @@ struct btrfs_block_group {
/* Record locked full stripes for RAID5/6 block group */
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
 
+   /* Flag indicating this block-group is placed on a sequential zone */
+   bool seq_zone;
/*
 * Allocation offset for the block group to implement sequential
 * allocation. This is used only with ZONED mode enabled.
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 73e083a86213..72735e948b6e 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1068,6 +1068,9 @@ int btrfs_load_block_group_zone_info(struct 
btrfs_block_group *cache, bool new)
}
}
 
+   if (num_sequential > 0)
+   cache->seq_zone = true;
+
if (num_conventional > 0) {
/*
 * Avoid calling calculate_alloc_pointer() for new BG. It
@@ -1188,3 +1191,29 @@ void btrfs_free_redirty_list(struct btrfs_transaction 
*trans)
}
spin_unlock(&trans->releasing_ebs_lock);
 }
+
+bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+{
+   struct btrfs_fs_info *fs_info = inode->root->fs_info;
+   struct btrfs_block_group *cache;
+   bool ret = false;
+
+   if (!btrfs_is_zoned(fs_info))
+   return false;
+
+   if (!fs_info->max_zone_append_size)
+   return false;
+
+   if (!is_data_inode(&inode->vfs_inode))
+   return false;
+
+   cache = btrfs_lookup_block_group(fs_info, em->block_start);
+   ASSERT(cache);
+   if (!cache)
+   return false;
+
+   ret = cache->seq_zone;
+   btrfs_put_block_group(cache);
+
+   return ret;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 331951978487..92888eb86055 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -46,6 +46,7 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group 
*cache);
 void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb);
 void btrfs_free_redirty_list(struct btrfs_transaction *trans);
+bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
 struct blk_zone *zone)
@@ -134,6 +135,10 @@ static inline void btrfs_redirty_list_add(struct 
btrfs_transaction *trans,
  struct extent_buffer *eb) { }
 static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
 
+bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+{
+   return false;
+}
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 
pos)
-- 
2.27.0