This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch auto-delete-3-plus-shard-move
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 2c07d72a6a2b370d6eec3f7e255ad67ac069ea61
Author: Robert Newson <rnew...@apache.org>
AuthorDate: Tue Jun 24 18:07:54 2025 +0100

    introduce read-only _calculate_drop_seq
---
 src/chttpd/src/chttpd_db.erl             | 16 ++++++++++++++-
 src/chttpd/src/chttpd_httpd_handlers.erl |  1 +
 src/fabric/src/fabric.erl                |  8 +++++++-
 src/fabric/src/fabric_drop_seq.erl       | 35 +++++++++++++++++++++-----------
 4 files changed, 46 insertions(+), 14 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 2c1e903c0..2b8466307 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -31,7 +31,8 @@
     update_doc/4,
     http_code_from_status/1,
     handle_partition_req/2,
-    handle_update_drop_seq_req/2
+    handle_update_drop_seq_req/2,
+    handle_calculate_drop_seq_req/2
 ]).
 
 -import(
@@ -404,6 +405,19 @@ handle_update_drop_seq_req(
 handle_update_drop_seq_req(Req, _Db) ->
     send_method_not_allowed(Req, "POST").
 
+handle_calculate_drop_seq_req(
+    #httpd{method = 'GET', path_parts = [_DbName, <<"_calculate_drop_seq">>]} 
= Req, Db
+) ->
+    case fabric:calculate_drop_seq(Db) of
+        {ok, _Shards, DropSeq} ->
+            Opaque = couch_util:encodeBase64Url(?term_to_bin(DropSeq, 
[compressed])),
+            send_json(Req, 201, {[{ok, true}, {drop_seq, Opaque}]});
+        {error, Reason} ->
+            chttpd:send_error(Req, Reason)
+    end;
+handle_calculate_drop_seq_req(Req, _Db) ->
+    send_method_not_allowed(Req, "GET").
+
 handle_design_req(
     #httpd{
         path_parts = [_DbName, _Design, Name, <<"_", _/binary>> = Action | 
_Rest]
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl 
b/src/chttpd/src/chttpd_httpd_handlers.erl
index 306885e83..8e0f0640e 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -36,6 +36,7 @@ db_handler(<<"_partition">>) -> fun 
chttpd_db:handle_partition_req/2;
 db_handler(<<"_temp_view">>) -> fun chttpd_view:handle_temp_view_req/2;
 db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
 db_handler(<<"_update_drop_seq">>) -> fun 
chttpd_db:handle_update_drop_seq_req/2;
+db_handler(<<"_calculate_drop_seq">>) -> fun 
chttpd_db:handle_calculate_drop_seq_req/2;
 db_handler(_) -> no_match.
 
 design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 1136d647b..2002bff91 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -34,7 +34,8 @@
     get_purged_infos/1,
     compact/1, compact/2,
     get_partition_info/2,
-    update_drop_seq/1
+    update_drop_seq/1,
+    calculate_drop_seq/1
 ]).
 
 % Documents
@@ -125,6 +126,11 @@ get_partition_info(DbName, Partition) ->
 update_drop_seq(DbName) ->
     fabric_drop_seq:go(dbname(DbName)).
 
+-spec calculate_drop_seq(dbname()) ->
+    {ok, [{node(), binary(), non_neg_integer()}]} | {error, term()}.
+calculate_drop_seq(DbName) ->
+    fabric_drop_seq:calculate_drop_seq(dbname(DbName)).
+
 %% @doc the number of docs in a database
 %% @equiv get_doc_count(DbName, <<"_all_docs">>)
 get_doc_count(DbName) ->
diff --git a/src/fabric/src/fabric_drop_seq.erl 
b/src/fabric/src/fabric_drop_seq.erl
index 7b35cc3b1..f9781cc4f 100644
--- a/src/fabric/src/fabric_drop_seq.erl
+++ b/src/fabric/src/fabric_drop_seq.erl
@@ -5,7 +5,10 @@
 -include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("stdlib/include/assert.hrl").
 
--export([go/1]).
+-export([
+    go/1,
+    calculate_drop_seq/1
+]).
 
 -export([
     create_peer_checkpoint_doc_if_missing/5,
@@ -40,19 +43,10 @@
 -define(END_KEY(SubType), <<?LOCAL_DOC_PREFIX, "peer-checkpoint-", 
SubType/binary, ".">>).
 
 go(DbName) ->
-    Shards0 = mem3:shards(DbName),
-    case gather_drop_seq_info(Shards0) of
+    case calculate_drop_seq(DbName) of
         {error, Reason} ->
             {error, Reason};
-        {ok, #{
-            uuid_map := UuidMap,
-            peer_checkpoints := PeerCheckpoints,
-            shard_sync_history := ShardSyncHistory
-        }} ->
-            Shards1 = fully_replicated_shards_only(Shards0, ShardSyncHistory),
-            DropSeqs = calculate_drop_seqs(
-                Shards0, UuidMap, PeerCheckpoints, ShardSyncHistory
-            ),
+        {ok, Shards1, DropSeqs} ->
             Workers = lists:filtermap(
                 fun(Shard) ->
                     #shard{range = Range, node = Node, name = ShardName} = 
Shard,
@@ -107,6 +101,23 @@ go(DbName) ->
             end
     end.
 
+calculate_drop_seq(DbName) ->
+    Shards0 = mem3:shards(DbName),
+    case gather_drop_seq_info(Shards0) of
+        {error, Reason} ->
+            {error, Reason};
+        {ok, #{
+            uuid_map := UuidMap,
+            peer_checkpoints := PeerCheckpoints,
+            shard_sync_history := ShardSyncHistory
+        }} ->
+            Shards1 = fully_replicated_shards_only(Shards0, ShardSyncHistory),
+            DropSeqs = calculate_drop_seqs(
+                Shards0, UuidMap, PeerCheckpoints, ShardSyncHistory
+            ),
+            {ok, Shards1, DropSeqs}
+    end.
+
 -spec calculate_drop_seqs([#shard{}], uuid_map(), peer_checkpoints(), 
shard_sync_history()) ->
     peer_checkpoints().
 calculate_drop_seqs(Shards, UuidMap, PeerCheckpoints0, ShardSyncHistory) ->

Reply via email to