If userspace issue a fstrim with a range not involve prefree segments,
it will reuse these segments without discard. This patch fix it.

Signed-off-by: Yunlei He <[email protected]>
---
 fs/f2fs/segment.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0738f48..30cd445 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -916,9 +916,13 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, 
struct cp_control *cpc)
 
                dirty_i->nr_dirty[PRE] -= end - start;
 
-               if (force || !test_opt(sbi, DISCARD))
+               if (!test_opt(sbi, DISCARD))
                        continue;
 
+               if (force && start >= cpc->trim_start &&
+                                       (end -1) <= cpc->trim_end)
+                               continue;
+
                if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
                        f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
                                (end - start) << sbi->log_blocks_per_seg);
@@ -2263,8 +2267,11 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct 
cp_control *cpc)
        f2fs_bug_on(sbi, sit_i->dirty_sentries);
 out:
        if (cpc->reason == CP_DISCARD) {
+               __u64 trim_start = cpc->trim_start;
                for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
                        add_discard_addrs(sbi, cpc);
+
+               cpc->trim_start = trim_start;
        }
        mutex_unlock(&sit_i->sentry_lock);
 
-- 
2.10.1


------------------------------------------------------------------------------
Developer Access Program for Intel Xeon Phi Processors
Access to Intel Xeon Phi processor-based developer platforms.
With one year of Intel Parallel Studio XE.
Training and support from Colfax.
Order your platform today.http://sdm.link/intel
_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to