On 10/31/23 07:11, Brian Foster wrote:
On Mon, Oct 30, 2023 at 08:07:17PM -0700, Guenter Roeck wrote:
Hi,
when trying the new bcachefs in the mainline kernel, I see the
attached ODEBUG and lockdep splats.
This is simply mounting an empty bcachefs file system and
copying some files into it.
Hi Guenter,
I reproduce fairly quickly with DEBUG_OBJECTS_WORK enabled. It looks to
me that the lockdep splat is essentially fallout from the subsequent
warning, which doesn't like the INIT_WORK() of the onstack rhashtable.
Ideally this could INIT_WORK_ONSTACK(), but I don't see support for that
plumbed through the rhashtable interface. Given this is the main copygc
task fn, it seems fairly reasonable to me to just slab alloc the whole
bucket tracking structure (as below). This addresses the splat for me on
a quick test.
The patch below fixes the problem for me.
Tested-by: Guenter Roeck <[email protected]>
Tested with arm64 qemu, both little and big endian.
Thanks,
Guenter
Kent,
Let me know if you're good with this and I'll post a proper patch..
Brian
--- 8< ---
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 4017120baeee..b835b85ef8b9 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -304,14 +304,16 @@ static int bch2_copygc_thread(void *arg)
struct moving_context ctxt;
struct bch_move_stats move_stats;
struct io_clock *clock = &c->io_clock[WRITE];
- struct buckets_in_flight buckets;
+ struct buckets_in_flight *buckets;
u64 last, wait;
int ret = 0;
- memset(&buckets, 0, sizeof(buckets));
-
- ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+ buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
+ if (!buckets)
+ return -ENOMEM;
+ ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
if (ret) {
+ kfree(buckets);
bch_err_msg(c, ret, "allocating copygc buckets in flight");
return ret;
}
@@ -329,12 +331,12 @@ static int bch2_copygc_thread(void *arg)
cond_resched();
if (!c->copy_gc_enabled) {
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(trans, &ctxt, buckets, true);
kthread_wait_freezable(c->copy_gc_enabled);
}
if (unlikely(freezing(current))) {
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(trans, &ctxt, buckets, true);
__refrigerator(false);
continue;
}
@@ -345,7 +347,7 @@ static int bch2_copygc_thread(void *arg)
if (wait > clock->max_slop) {
c->copygc_wait_at = last;
c->copygc_wait = last + wait;
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(trans, &ctxt, buckets, true);
trace_and_count(c, copygc_wait, c, wait, last + wait);
bch2_kthread_io_clock_wait(clock, last + wait,
MAX_SCHEDULE_TIMEOUT);
@@ -355,14 +357,16 @@ static int bch2_copygc_thread(void *arg)
c->copygc_wait = 0;
c->copygc_running = true;
- ret = bch2_copygc(trans, &ctxt, &buckets);
+ ret = bch2_copygc(trans, &ctxt, buckets);
c->copygc_running = false;
wake_up(&c->copygc_running_wq);
}
- move_buckets_wait(trans, &ctxt, &buckets, true);
- rhashtable_destroy(&buckets.table);
+ move_buckets_wait(trans, &ctxt, buckets, true);
+
+ rhashtable_destroy(&buckets->table);
+ kfree(buckets);
bch2_trans_put(trans);
bch2_moving_ctxt_exit(&ctxt);