This is an automated email from the ASF dual-hosted git repository.
vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git
The following commit(s) were added to refs/heads/main by this push:
new bbfc9d25f Expect the update sequence in header epochs list to regress
bbfc9d25f is described below
commit bbfc9d25f07a2ac3d8426b4d3675bb6f09cd0f0c
Author: Nick Vatamaniuc <[email protected]>
AuthorDate: Mon Aug 5 11:53:56 2024 -0400
Expect the update sequence in header epochs list to regress
It can happen during compaction as we figured out in #5163. In that commit
we removed
the assertion when we stay on the same node. However, it may still be
possible
for someone to resume compaction on another node by copying their files and
then the assertion could trigger.
Here we remove the assert and add a clause handling the regression. As it's
an
expected condition during compaction so we refuse to update the epochs list.
---
src/couch/src/couch_bt_engine_header.erl | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/src/couch/src/couch_bt_engine_header.erl
b/src/couch/src/couch_bt_engine_header.erl
index 642ae73ce..3581b1e39 100644
--- a/src/couch/src/couch_bt_engine_header.erl
+++ b/src/couch/src/couch_bt_engine_header.erl
@@ -282,8 +282,13 @@ upgrade_epochs(#db_header{} = Header) ->
% compaction proceeds. In that case the epochs sequence could
% be regressing.
Epochs0;
- [{_OtherNode, S} | _] = Epochs1 ->
- assert_monotonic_update_seq(Header, S),
+ [{_OtherNode, S} | _] = Epochs0 when Header#db_header.update_seq <
S ->
+ % When compacting the new header starts at update_seq = 0 and
+ % continues incrementing as the compaction proceeds. In that
+ % case the epochs sequence could be regressing. So if we detect
+ % a regression we don't update the epochs list.
+ Epochs0;
+ [{_OtherNode, _S} | _] = Epochs1 ->
% This node is taking over ownership of this db
% and marking the update sequence where it happened.
[{Node, Header#db_header.update_seq} | Epochs1]
@@ -295,15 +300,6 @@ upgrade_epochs(#db_header{} = Header) ->
DedupedEpochs = remove_dup_epochs(NewEpochs),
Header#db_header{epochs = DedupedEpochs}.
-assert_monotonic_update_seq(#db_header{update_seq = CurrentUpdateSeq},
LatestEpochSeq) ->
- ?assert(
- LatestEpochSeq =< CurrentUpdateSeq,
- "Latest epoch seq " ++
- integer_to_list(LatestEpochSeq) ++
- " should not be higher than current update seq " ++
- integer_to_list(CurrentUpdateSeq)
- ).
-
% This is slightly relying on the udpate_seq's being sorted
% in epochs due to how we only ever push things onto the
% front. Although if we ever had a case where the update_seq
@@ -460,12 +456,13 @@ upgrade_epochs_test() ->
CompactionHeader = NowOwnedHeader#db_header{update_seq = 0},
?assertEqual(CompactionHeader, upgrade(CompactionHeader)),
- % When nodes are different we do assert sequence regression
+ % When nodes are different, if sequence regression is not recorded as
that's
+ % what happens during compaction.we do assert sequence regression
NotOwnedHighSeq = set(NewHeader, [
{update_seq, 4},
{epochs, [{'someothernode@someotherhost', 5}]}
]),
- ?assertError({assert, _}, upgrade(NotOwnedHighSeq)),
+ ?assertEqual(NotOwnedHighSeq, upgrade(NotOwnedHighSeq)),
% Getting a reset header maintains the epoch data
ResetHeader = from(NewNewHeader),