zswap writeback is asynchronous, but test_zswap.c checks writeback
counters immediately after reclaim/trigger paths. On some platforms
(e.g. ppc64le), this can race with background writeback and cause
spurious failures even when behavior is correct.

Add wait_for_writeback() to poll get_cg_wb_count() with a bounded
timeout, and use it in:

  test_zswap_writeback_one() when writeback is expected
  test_no_invasive_cgroup_shrink() for the wb_group check

This keeps the original before/after assertion style while making the
tests robust against writeback completion latency.

No test behavior change, selftest stability improvement only.

Signed-off-by: Li Wang <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Michal Koutný <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Nhat Pham <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Yosry Ahmed <[email protected]>
---
 tools/testing/selftests/cgroup/test_zswap.c | 28 +++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/cgroup/test_zswap.c 
b/tools/testing/selftests/cgroup/test_zswap.c
index e96e316f7c47..e7d5dbc1ef7f 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -118,6 +118,27 @@ static char *setup_test_group_1M(const char *root, const 
char *name)
        return NULL;
 }
 
+/*
+ * Writeback is asynchronous; poll until at least one writeback has
+ * been recorded for @cg, or until @timeout_ms has elapsed.
+ */
+static long wait_for_writeback(const char *cg, int timeout_ms)
+{
+       int elapsed, count;
+       for (elapsed = 0; elapsed < timeout_ms; elapsed += 100) {
+               count = get_cg_wb_count(cg);
+
+               if (count < 0)
+                       return -1;
+               if (count > 0)
+                       return count;
+
+               usleep(100000);
+       }
+
+       return 0;
+}
+
 /*
  * Sanity test to check that pages are written into zswap.
  */
@@ -343,7 +364,10 @@ static int test_zswap_writeback_one(const char *cgroup, 
bool wb)
                return -1;
 
        /* Verify that zswap writeback occurred only if writeback was enabled */
-       zswpwb_after = get_cg_wb_count(cgroup);
+       if (wb)
+               zswpwb_after = wait_for_writeback(cgroup, 5000);
+       else
+               zswpwb_after = get_cg_wb_count(cgroup);
        if (zswpwb_after < 0)
                return -1;
 
@@ -473,7 +497,7 @@ static int test_no_invasive_cgroup_shrink(const char *root)
        }
 
        /* Verify that only zswapped memory from gwb_group has been written 
back */
-       if (get_cg_wb_count(wb_group) > 0 && get_cg_wb_count(zw_group) == 0)
+       if (wait_for_writeback(wb_group, 5000) > 0 && get_cg_wb_count(zw_group) 
== 0)
                ret = KSFT_PASS;
 out:
        cg_enter_current(root);
-- 
2.53.0


Reply via email to