This is an automated email from the ASF dual-hosted git repository. rnewson pushed a commit to branch auto-delete-3 in repository https://gitbox.apache.org/repos/asf/couchdb.git
commit 8c3a855ac861e8c729949db936adda63cd13f2a5 Author: Robert Newson <[email protected]> AuthorDate: Wed May 28 14:24:02 2025 +0100 PropCheck test for drop_seq against 3 node cluster --- .gitignore | 2 + Makefile | 7 + mix.exs | 5 +- test/elixir/lib/utils.ex | 2 +- test/elixir/test/drop_seq_statem_test.exs | 233 ++++++++++++++++++++++++++++++ 5 files changed, 246 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 080a7dd6f..4d4918a73 100644 --- a/.gitignore +++ b/.gitignore @@ -78,6 +78,8 @@ src/unicode_util_compat/ src/file_system/ src/rebar3/ src/erlfmt/ +src/libgraph/ +src/propcheck/ src/*.erl tmp/ diff --git a/Makefile b/Makefile index df2163d54..317188fe0 100644 --- a/Makefile +++ b/Makefile @@ -254,6 +254,13 @@ elixir-cluster-with-quorum: elixir-init devclean --degrade-cluster 1 \ --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)' +.PHONY: elixir-cluster +elixir-cluster: export MIX_ENV=integration +elixir-cluster: elixir-init devclean +# @rm -f _build/propcheck.ctex + @dev/run -n 3 -q -a adm:pass \ + --no-eval 'mix test --trace --only with_cluster $(EXUNIT_OPTS)' + .PHONY: elixir # target: elixir - Run Elixir-based integration tests elixir: export MIX_ENV=integration diff --git a/mix.exs b/mix.exs index 4f0cb4ba3..86fff4227 100644 --- a/mix.exs +++ b/mix.exs @@ -71,7 +71,7 @@ defmodule CouchDBTest.Mixfile do end # Run "mix help compile.app" to learn about applications. - def application, do: [applications: [:logger, :httpotion]] + def application, do: [applications: [:logger, :httpotion, :propcheck]] # Specifies which paths to compile per environment. defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"] @@ -85,7 +85,8 @@ defmodule CouchDBTest.Mixfile do {:httpotion, ">= 3.2.0", only: [:dev, :test, :integration], runtime: false}, {:excoveralls, "~> 0.18.3", only: :test}, {:ibrowse, path: path("ibrowse"), override: true}, - {:credo, "~> 1.7.11", only: [:dev, :test, :integration], runtime: false} + {:credo, "~> 1.7.11", only: [:dev, :test, :integration], runtime: false}, + {:propcheck, ">= 1.5.0", only: [:dev, :test, :integration]}, ] extra_deps = [:b64url, :jiffy, :jwtf, :meck, :mochiweb] diff --git a/test/elixir/lib/utils.ex b/test/elixir/lib/utils.ex index 3ecf878e7..15e321240 100644 --- a/test/elixir/lib/utils.ex +++ b/test/elixir/lib/utils.ex @@ -58,4 +58,4 @@ defmodule Couch.Test.Utils do end) end -end \ No newline at end of file +end diff --git a/test/elixir/test/drop_seq_statem_test.exs b/test/elixir/test/drop_seq_statem_test.exs new file mode 100644 index 000000000..63abcd549 --- /dev/null +++ b/test/elixir/test/drop_seq_statem_test.exs @@ -0,0 +1,233 @@ +defmodule DropSeqStateM do + use PropCheck, default_opts: &PropCheck.TestHelpers.config/0 + use PropCheck.StateM + use CouchTestCase + + # alias Couch.Test.Utils + # import Utils + + @moduletag capture_log: true + + # expected to pass in all three cluster scenarios + @moduletag :with_cluster + @moduletag :without_quorum_test + @moduletag :with_quorum_test + + property "drop_seq works fine", start_size: 5, max_size: 100, numtests: 2000 do + forall cmds <- more_commands(100, commands(__MODULE__)) do + trap_exit do + db_name = random_db_name() + {:ok, _} = create_db(db_name, query: %{n: 3, q: 4}) + r = run_commands(__MODULE__, cmds, [{:dbname, db_name}]) + {history, state, result} = r + delete_db(db_name) + + (result == :ok) + |> when_fail( + IO.puts(""" + Commands: #{inspect(cmds, pretty: true)} + History: #{inspect(history, pretty: true)} + State: #{inspect(state, pretty: true)} + Result: #{inspect(result, pretty: true)} + """) + ) + end + end + end + + require Record + + Record.defrecord(:state, + docs: [], + deleted_docs: [], + current_seq: 0, + peer_checkpoint_seq: nil, + drop_seq: nil, + drop_count: 0 + ) + + def initial_state() do + state() + end + + @max_doc_ids 10 + @doc_ids 1..@max_doc_ids |> Enum.map(&"doc-#{&1}") + + def doc_id, do: oneof(@doc_ids) + + def command(_state) do + oneof([ + {:call, __MODULE__, :update_document, [{:var, :dbname}, doc_id()]}, + {:call, __MODULE__, :delete_document, [{:var, :dbname}, doc_id()]}, + {:call, __MODULE__, :update_peer_checkpoint, [{:var, :dbname}]}, + {:call, __MODULE__, :update_drop_seq, [{:var, :dbname}]}, + {:call, __MODULE__, :compact_db, [{:var, :dbname}]}, + {:call, __MODULE__, :changes, [{:var, :dbname}]} + ]) + end + + def get_document(db_name, doc_id) do + resp = Couch.get("/#{db_name}/#{doc_id}") + + case resp.status_code do + 200 -> + {:ok, resp.body} + + 404 -> + {:not_found, resp.body["reason"]} + end + end + + def update_document(db_name, doc_id) do + case get_document(db_name, doc_id) do + {:ok, doc} -> + resp = Couch.put("/#{db_name}/#{doc_id}", body: doc) + assert resp.status_code == 201 + + {:not_found, _} -> + resp = Couch.put("/#{db_name}/#{doc_id}", body: %{}) + assert resp.status_code == 201 + end + sync_shards(db_name) + end + + def delete_document(db_name, doc_id) do + case get_document(db_name, doc_id) do + {:ok, doc} -> + rev = doc["_rev"] + resp = Couch.delete("/#{db_name}/#{doc_id}?rev=#{rev}") + assert resp.status_code == 200 + + {:not_found, _} -> + :ok + end + sync_shards(db_name) + end + + def update_peer_checkpoint(db_name) do + resp = Couch.get("/#{db_name}") + assert resp.status_code == 200 + update_seq = resp.body["update_seq"] + + resp = + Couch.put("/#{db_name}/_local/peer-checkpoint-foo", + body: %{ + update_seq: update_seq + } + ) + + assert resp.status_code == 201 + update_seq + end + + def update_drop_seq(db_name) do + resp = Couch.post("/#{db_name}/_update_drop_seq") + assert resp.status_code == 201 + resp.body + end + + def compact_db(db_name) do + compact(db_name) + # try to avoid seeing pre-compact state of shards immediately after + # compactor pids exit + :timer.sleep(1000) + end + + def changes(db_name) do + resp = Couch.get("/#{db_name}/_changes") + assert resp.status_code == 200 + + List.foldl(resp.body["results"], {[], []}, fn change, {doc_ids, del_doc_ids} -> + if change["deleted"] do + {doc_ids, Enum.sort([change["id"] | del_doc_ids])} + else + {Enum.sort([change["id"] | doc_ids]), del_doc_ids} + end + end) + end + + def sync_shards(db_name) do + resp = Couch.post("/#{db_name}/_sync_shards") + assert resp.status_code == 202 + :timer.sleep(1000) + end + + def precondition(s, {:call, _, :update_document, [_db_name, doc_id]}) do + not doc_exists(s, doc_id) + end + + def precondition(s, {:call, _, :delete_document, [_db_name, doc_id]}) do + doc_exists(s, doc_id) + end + + def precondition(_, _) do + true + end + + def next_state(s, _v, {:call, _, :update_document, [_db_name, doc_id]}) do + state(s, + current_seq: state(s, :current_seq) + 1, + docs: Enum.sort([doc_id | state(s, :docs)]), + deleted_docs: List.keydelete(state(s, :deleted_docs), doc_id, 0) + ) + end + + def next_state(s, _v, {:call, _, :delete_document, [_db_name, doc_id]}) do + state(s, + current_seq: state(s, :current_seq) + 1, + docs: List.delete(state(s, :docs), doc_id), + deleted_docs: + Enum.sort([{doc_id, state(s, :current_seq) + 1} | state(s, :deleted_docs)]) + ) + end + + def next_state(s, _v, {:call, _, :update_peer_checkpoint, [_db_name]}) do + state(s, peer_checkpoint_seq: state(s, :current_seq)) + end + + def next_state(s, _v, {:call, _, :update_drop_seq, [_db_name]}) do + # we'll drop all tombstones if _update_drop_seq is called when there + # are no peer checkpoint docs as the only peers are the shard syncs + # which update automatically + drop_seq = + if state(s, :peer_checkpoint_seq) == nil, + do: state(s, :current_seq), + else: state(s, :peer_checkpoint_seq) + + state(s, drop_seq: drop_seq) + end + + def next_state(s, _v, {:call, _, :compact_db, [_db_name]}) do + {keep_docs, drop_docs} = + Enum.split_with(state(s, :deleted_docs), fn {_, seq} -> + state(s, :drop_seq) == nil or seq > state(s, :drop_seq) + end) + + state(s, + deleted_docs: keep_docs, + drop_count: state(s, :drop_count) + length(drop_docs) + ) + end + + def next_state(s, _v, {:call, _, :changes, [_db_name]}) do + s + end + + def postcondition(s, {:call, _, :changes, [_db_name]}, {doc_ids, del_doc_ids}) do + doc_ids == doc_ids(s) and del_doc_ids == deleted_doc_ids(s) + end + + def postcondition(_, _, _), do: true + + def doc_exists(s, doc_id), do: doc_id in state(s, :docs) + + def deleted_doc_exists(s, doc_id) do + List.keymember?(state(s, :deleted_docs), doc_id, 0) + end + + def doc_ids(s), do: state(s, :docs) + + def deleted_doc_ids(s) do + Enum.map(state(s, :deleted_docs), fn {doc_id, _} -> doc_id end) + end +end
