This is a small optimization, reducing the number of cachelines we touch in the fast path - and it's also necessary for the next patch that increases JOURNAL_BUF_NR.
Signed-off-by: Kent Overstreet <[email protected]> --- fs/bcachefs/journal.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index 1e5fcfe3624a..e514d664b8ae 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -364,7 +364,10 @@ static inline int journal_res_get_fast(struct journal *j, res->ref = true; res->offset = old.cur_entry_offset; - res->seq = le64_to_cpu(j->buf[old.idx].data->seq); + res->seq = journal_cur_seq(j); + res->seq -= (res->seq - old.idx) & JOURNAL_BUF_MASK; + + EBUG_ON(res->seq != le64_to_cpu(j->buf[old.idx].data->seq)); return 1; } -- 2.45.2
