- Get rid of `sbi->opt.max_sync_decompress_pages` since it's fixed as 3 all the time;
- Add Z_EROFS_MAX_SYNC_DECOMPRESS_BYTES in bytes instead of in pages, since for non-4K pages, 3-page limitation makes no sense; - Move `sync_decompress` to sbi to avoid unexpected remount impact; - Fold z_erofs_is_sync_decompress() into its caller; - Better description of sysfs entry `sync_decompress`. Signed-off-by: Gao Xiang <[email protected]> --- Documentation/ABI/testing/sysfs-fs-erofs | 14 ++++++---- fs/erofs/internal.h | 5 +--- fs/erofs/super.c | 3 +- fs/erofs/sysfs.c | 2 +- fs/erofs/zdata.c | 35 +++++++++--------------- 5 files changed, 25 insertions(+), 34 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-fs-erofs b/Documentation/ABI/testing/sysfs-fs-erofs index 76d9808ed581..b9243c7f28d7 100644 --- a/Documentation/ABI/testing/sysfs-fs-erofs +++ b/Documentation/ABI/testing/sysfs-fs-erofs @@ -10,12 +10,16 @@ Description: Shows all enabled kernel features. What: /sys/fs/erofs/<disk>/sync_decompress Date: November 2021 Contact: "Huang Jianan" <[email protected]> -Description: Control strategy of sync decompression: +Description: Control strategy of synchronous decompression. Synchronous + decompression tries to decompress in the reader thread for + synchronous reads and small asynchronous reads (<= 12 KiB): - - 0 (default, auto): enable for readpage, and enable for - readahead on atomic contexts only. - - 1 (force on): enable for readpage and readahead. - - 2 (force off): disable for all situations. + - 0 (auto, default): apply to synchronous reads only, but will + switch to 1 (force on) if any decompression + request is detected in atomic contexts; + - 1 (force on): apply to synchronous reads and small + asynchronous reads; + - 2 (force off): disable synchronous decompression completely. What: /sys/fs/erofs/<disk>/drop_caches Date: November 2024 diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index f7f622836198..87edbb4366d1 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -59,10 +59,6 @@ enum { struct erofs_mount_opts { /* current strategy of how to use managed cache */ unsigned char cache_strategy; - /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ - unsigned int sync_decompress; - /* threshold for decompression synchronously */ - unsigned int max_sync_decompress_pages; unsigned int mount_opt; }; @@ -116,6 +112,7 @@ struct erofs_sb_info { /* managed XArray arranged in physical block number */ struct xarray managed_pslots; + unsigned int sync_decompress; /* strategy for sync decompression */ unsigned int shrinker_run_no; u16 available_compr_algs; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 937a215f626c..9f90d410ab94 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -372,8 +372,7 @@ static void erofs_default_options(struct erofs_sb_info *sbi) { #ifdef CONFIG_EROFS_FS_ZIP sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; - sbi->opt.max_sync_decompress_pages = 3; - sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; + sbi->sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; #endif #ifdef CONFIG_EROFS_FS_XATTR set_opt(&sbi->opt, XATTR_USER); diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c index 1e0658a1d95b..86b22b9f0c19 100644 --- a/fs/erofs/sysfs.c +++ b/fs/erofs/sysfs.c @@ -59,7 +59,7 @@ static struct erofs_attr erofs_attr_##_name = { \ #define ATTR_LIST(name) (&erofs_attr_##name.attr) #ifdef CONFIG_EROFS_FS_ZIP -EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts); +EROFS_ATTR_RW_UI(sync_decompress, erofs_sb_info); EROFS_ATTR_FUNC(drop_caches, 0200); #endif #ifdef CONFIG_EROFS_FS_ZIP_ACCEL diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3d31f7840ca0..f07f9db3b019 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -9,6 +9,7 @@ #include <linux/cpuhotplug.h> #include <trace/events/erofs.h> +#define Z_EROFS_MAX_SYNC_DECOMPRESS_BYTES 12288 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_INLINE_BVECS 2 @@ -1095,21 +1096,6 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f, return err; } -static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, - unsigned int readahead_pages) -{ - /* auto: enable for read_folio, disable for readahead */ - if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && - !readahead_pages) - return true; - - if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && - (readahead_pages <= sbi->opt.max_sync_decompress_pages)) - return true; - - return false; -} - static bool z_erofs_page_is_invalidated(struct page *page) { return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); @@ -1483,9 +1469,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, #else queue_work(z_erofs_workqueue, &io->u.work); #endif - /* enable sync decompression for readahead */ - if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) - sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; + /* See `sync_decompress` in sysfs-fs-erofs for more details */ + if (sbi->sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) + sbi->sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; return; } z_erofs_decompressqueue_work(&io->u.work); @@ -1802,16 +1788,21 @@ static void z_erofs_submit_queue(struct z_erofs_frontend *f, z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); } -static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages) +static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rabytes) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); - bool force_fg = z_erofs_is_sync_decompress(sbi, rapages); + int syncmode = sbi->sync_decompress; + bool force_fg; int err; + force_fg = (syncmode == EROFS_SYNC_DECOMPRESS_AUTO && !rabytes) || + (syncmode == EROFS_SYNC_DECOMPRESS_FORCE_ON && + (rabytes <= Z_EROFS_MAX_SYNC_DECOMPRESS_BYTES)); + if (f->head == Z_EROFS_PCLUSTER_TAIL) return 0; - z_erofs_submit_queue(f, io, &force_fg, !!rapages); + z_erofs_submit_queue(f, io, &force_fg, !!rabytes); /* handle bypass queue (no i/o pclusters) immediately */ err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); @@ -1932,7 +1923,7 @@ static void z_erofs_readahead(struct readahead_control *rac) z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); - (void)z_erofs_runqueue(&f, nrpages); + (void)z_erofs_runqueue(&f, nrpages << PAGE_SHIFT); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); } -- 2.43.5
