The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=d8f3e8781b2ce316120e9a5a42bb1174e9a53187

commit d8f3e8781b2ce316120e9a5a42bb1174e9a53187
Author:     Mark Johnston <ma...@freebsd.org>
AuthorDate: 2025-07-21 13:35:12 +0000
Commit:     Mark Johnston <ma...@freebsd.org>
CommitDate: 2025-07-21 14:57:58 +0000

    makefs/zfs: Make it possible to set a compression property
    
    makefs currently does not implement compression for ZFS datasets, as
    doing so seems somewhat fraught with compatibility issues.  As a result,
    the root dataset has compression disabled, and all others inherit from
    that.
    
    However, it may be useful to enable compression for new files once the
    generated pool is actually imported.  Thus, implement a per-data
    compression option.  By default, compression is inherited from the
    parent dataset and disabled on the root dataset.
    
    Add a regression test.
    
    PR:             288241
    MFC after:      1 month
---
 usr.sbin/makefs/makefs.8                  | 10 +++-
 usr.sbin/makefs/tests/makefs_zfs_tests.sh | 90 +++++++++++++++++++++++++++++++
 usr.sbin/makefs/zfs/dsl.c                 | 44 +++++++++++++--
 3 files changed, 137 insertions(+), 7 deletions(-)

diff --git a/usr.sbin/makefs/makefs.8 b/usr.sbin/makefs/makefs.8
index a11eaf8206e9..c46f283f90a8 100644
--- a/usr.sbin/makefs/makefs.8
+++ b/usr.sbin/makefs/makefs.8
@@ -33,8 +33,7 @@
 .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 .\" POSSIBILITY OF SUCH DAMAGE.
 .\"
-.Dd January 19, 2024
-
+.Dd July 19, 2025
 .Dt MAKEFS 8
 .Os
 .Sh NAME
@@ -596,10 +595,17 @@ The following properties may be set for a dataset:
 .Bl -tag -compact -offset indent
 .It atime
 .It canmount
+.It compression
 .It exec
 .It mountpoint
 .It setuid
 .El
+Note that
+.Nm
+does not implement compression of files included in the image,
+regardless of the value of the
+.Dv compression
+property.
 .El
 .Sh SEE ALSO
 .Xr mtree 5 ,
diff --git a/usr.sbin/makefs/tests/makefs_zfs_tests.sh 
b/usr.sbin/makefs/tests/makefs_zfs_tests.sh
index 520d1f211ac3..d73da083a8c2 100644
--- a/usr.sbin/makefs/tests/makefs_zfs_tests.sh
+++ b/usr.sbin/makefs/tests/makefs_zfs_tests.sh
@@ -124,6 +124,95 @@ basic_cleanup()
        common_cleanup
 }
 
+#
+# Try configuring various compression algorithms.
+#
+atf_test_case compression cleanup
+compression_body()
+{
+       create_test_inputs
+
+       cd $TEST_INPUTS_DIR
+       mkdir dir
+       mkdir dir2
+       cd -
+
+       for alg in off on lzjb gzip gzip-1 gzip-2 gzip-3 gzip-4 \
+           gzip-5 gzip-6 gzip-7 gzip-8 gzip-9 zle lz4 zstd; do
+               atf_check $MAKEFS -s 1g -o rootpath=/ \
+                   -o poolname=$ZFS_POOL_NAME \
+                   -o fs=${ZFS_POOL_NAME}\;compression=$alg \
+                   -o fs=${ZFS_POOL_NAME}/dir \
+                   -o fs=${ZFS_POOL_NAME}/dir2\;compression=off \
+                   $TEST_IMAGE $TEST_INPUTS_DIR
+
+               import_image
+
+               check_image_contents
+
+               if [ $alg = gzip-6 ]; then
+                       # ZFS reports gzip-6 as just gzip since it uses
+                       # a default compression level of 6.
+                       alg=gzip
+               fi
+               # The "dir" dataset's compression algorithm should be
+               # inherited from the root dataset.
+               atf_check -o inline:$alg\\n -e empty -s exit:0 \
+                   zfs get -H -o value compression ${ZFS_POOL_NAME}
+               atf_check -o inline:$alg\\n -e empty -s exit:0 \
+                   zfs get -H -o value compression ${ZFS_POOL_NAME}/dir
+               atf_check -o inline:off\\n -e empty -s exit:0 \
+                   zfs get -H -o value compression ${ZFS_POOL_NAME}/dir2
+
+               atf_check -e ignore dd if=/dev/random \
+                   of=${TEST_MOUNT_DIR}/dir/random bs=1M count=10
+               atf_check -e ignore dd if=/dev/zero \
+                   of=${TEST_MOUNT_DIR}/dir/zero bs=1M count=10
+               atf_check -e ignore dd if=/dev/zero \
+                   of=${TEST_MOUNT_DIR}/dir2/zero bs=1M count=10
+
+               # Export and reimport to ensure that everything is
+               # flushed to disk.
+               atf_check zpool export ${ZFS_POOL_NAME}
+               atf_check -o ignore -e empty -s exit:0 \
+                   zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd \
+                   $ZFS_POOL_NAME
+               atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
+
+               if [ $alg = off ]; then
+                       # If compression is off, the files should be the
+                       # same size as the input.
+                       atf_check -o 
match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
+                           du -m ${TEST_MOUNT_DIR}/dir/random
+                       atf_check -o 
match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
+                           du -m ${TEST_MOUNT_DIR}/dir/zero
+                       atf_check -o 
match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
+                           du -m ${TEST_MOUNT_DIR}/dir2/zero
+               else
+                       # If compression is on, the dir/zero file ought
+                       # to be smaller.
+                       atf_check -o 
match:"^1[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
+                           du -m ${TEST_MOUNT_DIR}/dir/zero
+                       atf_check -o 
match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
+                           du -m ${TEST_MOUNT_DIR}/dir/random
+                       atf_check -o 
match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
+                           du -m ${TEST_MOUNT_DIR}/dir2/zero
+               fi
+
+               atf_check zpool destroy ${ZFS_POOL_NAME}
+               atf_check rm -f ${TEST_ZFS_POOL_NAME}
+               atf_check mdconfig -d -u $(cat ${TEST_MD_DEVICE_FILE})
+               atf_check rm -f ${TEST_MD_DEVICE_FILE}
+       done
+}
+compression_cleanup()
+{
+       common_cleanup
+}
+
+#
+# Try destroying a dataset that was created by makefs.
+#
 atf_test_case dataset_removal cleanup
 dataset_removal_body()
 {
@@ -939,6 +1028,7 @@ atf_init_test_cases()
 {
        atf_add_test_case autoexpand
        atf_add_test_case basic
+       atf_add_test_case compression
        atf_add_test_case dataset_removal
        atf_add_test_case devfs
        atf_add_test_case empty_dir
diff --git a/usr.sbin/makefs/zfs/dsl.c b/usr.sbin/makefs/zfs/dsl.c
index f7264b9d2ca7..8a8cee7c82b2 100644
--- a/usr.sbin/makefs/zfs/dsl.c
+++ b/usr.sbin/makefs/zfs/dsl.c
@@ -193,6 +193,39 @@ dsl_dir_set_prop(zfs_opt_t *zfs, zfs_dsl_dir_t *dir, const 
char *key,
                        nvlist_add_uint64(nvl, key, 0);
                else
                        errx(1, "invalid value `%s' for %s", val, key);
+       } else if (strcmp(key, "compression") == 0) {
+               size_t i;
+
+               const struct zfs_compression_algorithm {
+                       const char *name;
+                       enum zio_compress alg;
+               } compression_algorithms[] = {
+                       { "off", ZIO_COMPRESS_OFF },
+                       { "on", ZIO_COMPRESS_ON },
+                       { "lzjb", ZIO_COMPRESS_LZJB },
+                       { "gzip", ZIO_COMPRESS_GZIP_6 },
+                       { "gzip-1", ZIO_COMPRESS_GZIP_1 },
+                       { "gzip-2", ZIO_COMPRESS_GZIP_2 },
+                       { "gzip-3", ZIO_COMPRESS_GZIP_3 },
+                       { "gzip-4", ZIO_COMPRESS_GZIP_4 },
+                       { "gzip-5", ZIO_COMPRESS_GZIP_5 },
+                       { "gzip-6", ZIO_COMPRESS_GZIP_6 },
+                       { "gzip-7", ZIO_COMPRESS_GZIP_7 },
+                       { "gzip-8", ZIO_COMPRESS_GZIP_8 },
+                       { "gzip-9", ZIO_COMPRESS_GZIP_9 },
+                       { "zle", ZIO_COMPRESS_ZLE },
+                       { "lz4", ZIO_COMPRESS_LZ4 },
+                       { "zstd", ZIO_COMPRESS_ZSTD },
+               };
+               for (i = 0; i < nitems(compression_algorithms); i++) {
+                       if (strcmp(val, compression_algorithms[i].name) == 0) {
+                               nvlist_add_uint64(nvl, key,
+                                   compression_algorithms[i].alg);
+                               break;
+                       }
+               }
+               if (i == nitems(compression_algorithms))
+                       errx(1, "invalid compression algorithm `%s'", val);
        } else {
                errx(1, "unknown property `%s'", key);
        }
@@ -236,9 +269,6 @@ dsl_init(zfs_opt_t *zfs)
 
        zfs->rootdsldir = dsl_dir_alloc(zfs, NULL);
 
-       nvlist_add_uint64(zfs->rootdsldir->propsnv, "compression",
-           ZIO_COMPRESS_OFF);
-
        zfs->rootds = dsl_dataset_alloc(zfs, zfs->rootdsldir);
        zfs->rootdsldir->headds = zfs->rootds;
 
@@ -288,9 +318,13 @@ dsl_init(zfs_opt_t *zfs)
        }
 
        /*
-        * Set the root dataset's mount point if the user didn't override the
-        * default.
+        * Set the root dataset's mount point and compression strategy if the
+        * user didn't override the defaults.
         */
+       if (nvpair_find(zfs->rootdsldir->propsnv, "compression") == NULL) {
+               nvlist_add_uint64(zfs->rootdsldir->propsnv, "compression",
+                   ZIO_COMPRESS_OFF);
+       }
        if (nvpair_find(zfs->rootdsldir->propsnv, "mountpoint") == NULL) {
                nvlist_add_string(zfs->rootdsldir->propsnv, "mountpoint",
                    zfs->rootpath);

Reply via email to