commit: 8de265d8c68cf991be31317d45ea657275319868 Author: Georgy Yakovlev <gyakovlev <AT> gentoo <DOT> org> AuthorDate: Tue Nov 16 19:43:07 2021 +0000 Commit: Georgy Yakovlev <gyakovlev <AT> gentoo <DOT> org> CommitDate: Tue Nov 16 19:44:40 2021 +0000 URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=8de265d8
sys-fs/btrfs-progs: revbump 5.15, add (meta)data calculation patches Signed-off-by: Georgy Yakovlev <gyakovlev <AT> gentoo.org> ...rogs-5.15.ebuild => btrfs-progs-5.15-r1.ebuild} | 6 ++ .../files/5.15-filesystem-usage-chunks.patch | 94 ++++++++++++++++++++++ .../files/5.15-filesystem-usage-data.patch | 37 +++++++++ 3 files changed, 137 insertions(+) diff --git a/sys-fs/btrfs-progs/btrfs-progs-5.15.ebuild b/sys-fs/btrfs-progs/btrfs-progs-5.15-r1.ebuild similarity index 94% rename from sys-fs/btrfs-progs/btrfs-progs-5.15.ebuild rename to sys-fs/btrfs-progs/btrfs-progs-5.15-r1.ebuild index 36548f98db8a..ec22d07e7854 100644 --- a/sys-fs/btrfs-progs/btrfs-progs-5.15.ebuild +++ b/sys-fs/btrfs-progs/btrfs-progs-5.15-r1.ebuild @@ -79,6 +79,12 @@ fi REQUIRED_USE="python? ( ${PYTHON_REQUIRED_USE} )" +# https://github.com/kdave/btrfs-progs/issues/422 +PATCHES=( + "${FILESDIR}/${PV}-filesystem-usage-data.patch" + "${FILESDIR}/${PV}-filesystem-usage-chunks.patch" +) + pkg_setup() { use python && python-single-r1_pkg_setup } diff --git a/sys-fs/btrfs-progs/files/5.15-filesystem-usage-chunks.patch b/sys-fs/btrfs-progs/files/5.15-filesystem-usage-chunks.patch new file mode 100644 index 000000000000..be58afc8712c --- /dev/null +++ b/sys-fs/btrfs-progs/files/5.15-filesystem-usage-chunks.patch @@ -0,0 +1,94 @@ +From: Nikolay Borisov <nbori...@suse.com> +To: linux-bt...@vger.kernel.org +Cc: Nikolay Borisov <nbori...@suse.com> +Subject: [PATCH] Fix calculation of chunk size for RAID1/DUP profiles +Date: Tue, 16 Nov 2021 16:02:06 +0200 +Message-Id: <20211116140206.291252-1-nbori...@suse.com> +List-ID: <linux-btrfs.vger.kernel.org> + +Current formula calculates the stripe size, however that's not what we want +in the case of RAID1/DUP profiles. In those cases since chunkc are mirrored +across devices we want the full size of the chunk. Without this patch the +'btrfs fi usage' output from an fs which is using RAID1 is: + + <snip> + + Data,RAID1: Size:2.00GiB, Used:1.00GiB (50.03%) + /dev/vdc 1.00GiB + /dev/vdf 1.00GiB + + Metadata,RAID1: Size:256.00MiB, Used:1.34MiB (0.52%) + /dev/vdc 128.00MiB + /dev/vdf 128.00MiB + + System,RAID1: Size:8.00MiB, Used:16.00KiB (0.20%) + /dev/vdc 4.00MiB + /dev/vdf 4.00MiB + + Unallocated: + /dev/vdc 8.87GiB + /dev/vdf 8.87GiB + + +So a 2 gigabyte RAID1 chunk actually will take up 4 gigabytes on the actual disks +2 each. In this case this is being miscalculated as taking up 1gb on each device. + +This also leads to erroneously calculated unallocated space. The correct output +in this case is: + + <snip> + + Data,RAID1: Size:2.00GiB, Used:1.00GiB (50.03%) + /dev/vdc 2.00GiB + /dev/vdf 2.00GiB + + Metadata,RAID1: Size:256.00MiB, Used:1.34MiB (0.52%) + /dev/vdc 256.00MiB + /dev/vdf 256.00MiB + + System,RAID1: Size:8.00MiB, Used:16.00KiB (0.20%) + /dev/vdc 8.00MiB + /dev/vdf 8.00MiB + + Unallocated: + /dev/vdc 7.74GiB + /dev/vdf 7.74GiB + + +Fix it by only utilising the chunk formula for profiles which are not RAID1/DUP. + +Signed-off-by: Nikolay Borisov <nbori...@suse.com> +--- + cmds/filesystem-usage.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/cmds/filesystem-usage.c b/cmds/filesystem-usage.c +index 6195f633da44..5f2289a9b40d 100644 +--- a/cmds/filesystem-usage.c ++++ b/cmds/filesystem-usage.c +@@ -805,11 +805,17 @@ int load_chunk_and_device_info(int fd, struct chunk_info **chunkinfo, + */ + static u64 calc_chunk_size(struct chunk_info *ci) + { +- u32 div; ++ u32 div = 1; + +- /* No parity + sub_stripes, so order of "-" and "/" does not matter */ +- div = (ci->num_stripes - btrfs_bg_type_to_nparity(ci->type)) / +- btrfs_bg_type_to_sub_stripes(ci->type); ++ /* ++ * The formula doesn't work for RAID1/DUP types, we should just return the ++ * chunk size ++ */ ++ if (!(ci->type & (BTRFS_BLOCK_GROUP_RAID1_MASK|BTRFS_BLOCK_GROUP_DUP))) { ++ /* No parity + sub_stripes, so order of "-" and "/" does not matter */ ++ div = (ci->num_stripes - btrfs_bg_type_to_nparity(ci->type)) / ++ btrfs_bg_type_to_sub_stripes(ci->type); ++ } + + return ci->size / div; + } +-- +2.17.1 + + diff --git a/sys-fs/btrfs-progs/files/5.15-filesystem-usage-data.patch b/sys-fs/btrfs-progs/files/5.15-filesystem-usage-data.patch new file mode 100644 index 000000000000..f3b43d5c594d --- /dev/null +++ b/sys-fs/btrfs-progs/files/5.15-filesystem-usage-data.patch @@ -0,0 +1,37 @@ +From 2f3950c8304fec2aed9bf11b52d073683b137330 Mon Sep 17 00:00:00 2001 +From: Nikolay Borisov <nbori...@suse.com> +Date: Mon, 15 Nov 2021 11:15:42 +0200 +Subject: [PATCH] btrfs-progs: fi usage: don't reset ratio to 1 if we don't + have RAID56 profile + +Commit 80714610f36e ("btrfs-progs: use raid table for ncopies") +slightly broke how raid ratio are being calculated since the resulting +code would always reset ratio to be 1 in case we didn't have RAID56 +profile. The correct behavior is to simply set it to 0 if we have RAID56 +as the calculation is different in this case and leave it intact +otherwise. + +This bug manifests by doing all size-related calculation for 'btrfs +filesystem usage' command as if all block groups are of type SINGLE. Fix +this by only resetting ratio 0 in case of RAID56. + +Issue: #422 +Signed-off-by: Nikolay Borisov <nbori...@suse.com> +Signed-off-by: David Sterba <dste...@suse.com> +--- + cmds/filesystem-usage.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/cmds/filesystem-usage.c b/cmds/filesystem-usage.c +index e22efe3a4..bac0f0fd5 100644 +--- a/cmds/filesystem-usage.c ++++ b/cmds/filesystem-usage.c +@@ -508,8 +508,6 @@ static int print_filesystem_usage_overall(int fd, struct chunk_info *chunkinfo, + */ + if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) + ratio = 0; +- else +- ratio = 1; + + if (ratio > max_data_ratio) + max_data_ratio = ratio;