This is an automated email from the ASF dual-hosted git repository.

jiangphcn pushed a commit to branch COUCHDB-3326-clustered-purge-davisp-refactor
in repository https://gitbox.apache.org/repos/asf/couchdb.git


The following commit(s) were added to 
refs/heads/COUCHDB-3326-clustered-purge-davisp-refactor by this push:
     new c87286c  Add purge upgrade test case
c87286c is described below

commit c87286cab8364522cb96f40d9c0b50e11c9366bd
Author: jiangphcn <jian...@cn.ibm.com>
AuthorDate: Tue May 8 17:36:01 2018 +0800

    Add purge upgrade test case
    
    COUCHDB-3326
---
 src/couch/src/couch_bt_engine.erl                  |  11 +-
 src/couch/test/couch_db_purge_upgrade_tests.erl    | 176 ++++++++++++++++++---
 src/couch/test/fixtures/db_with_1_purge_req.couch  | Bin 8354 -> 8383 bytes
 src/couch/test/fixtures/db_with_2_purge_req.couch  | Bin 0 -> 16566 bytes
 src/couch/test/fixtures/db_without_purge_req.couch | Bin 0 -> 61644 bytes
 5 files changed, 159 insertions(+), 28 deletions(-)

diff --git a/src/couch/src/couch_bt_engine.erl 
b/src/couch/src/couch_bt_engine.erl
index f4d37aa..11c48d0 100644
--- a/src/couch/src/couch_bt_engine.erl
+++ b/src/couch/src/couch_bt_engine.erl
@@ -815,7 +815,12 @@ init_state(FilePath, Fd, Header0, Options) ->
         ]),
 
     PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
-    {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
+    PurgeTreeState2 = case PurgeTreeState of
+        0 -> nil;
+        _ -> PurgeTreeState
+    end,
+
+    {ok, PurgeTree} = couch_btree:open(PurgeTreeState2, Fd, [
         {split, fun ?MODULE:purge_tree_split/1},
         {join, fun ?MODULE:purge_tree_join/2},
         {reduce, fun ?MODULE:purge_tree_reduce/2}
@@ -914,7 +919,7 @@ upgrade_purge_info(Fd, Header) ->
                     {reduce, fun ?MODULE:purge_tree_reduce/2}
                 ]),
                 {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
-                {ok, PurgeTreeSt} = couch_btree:get_state(PurgeTree2),
+                PurgeTreeSt = couch_btree:get_state(PurgeTree2),
 
                 {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
                     {split, fun ?MODULE:purge_seq_tree_split/1},
@@ -922,7 +927,7 @@ upgrade_purge_info(Fd, Header) ->
                     {reduce, fun ?MODULE:purge_tree_reduce/2}
                 ]),
                 {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
-                {ok, PurgeSeqTreeSt} = couch_btree:get_state(PurgeSeqTree2),
+                PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
 
                 couch_bt_engine_header:set(Header, [
                     {purge_tree_state, PurgeTreeSt},
diff --git a/src/couch/test/couch_db_purge_upgrade_tests.erl 
b/src/couch/test/couch_db_purge_upgrade_tests.erl
index 0149b62..3602f74 100644
--- a/src/couch/test/couch_db_purge_upgrade_tests.erl
+++ b/src/couch/test/couch_db_purge_upgrade_tests.erl
@@ -15,25 +15,34 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 
+-define(USER, "couch_db_purge_upgrade_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
 -define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(TIMEOUT, 1000).
 
 
 setup() ->
-    DbName = <<"db_with_1_purge_req">>,
-    DbFileName = "db_with_1_purge_req.couch",
-    OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
     DbDir = config:get("couchdb", "database_dir"),
-    NewDbFilePath = filename:join([DbDir, DbFileName]),
-    Files = [NewDbFilePath],
+    DbFileNames = [
+        "db_without_purge_req",
+        "db_with_1_purge_req",
+        "db_with_2_purge_req"
+    ],
+    lists:map(fun(DbFileName) ->
+        write_db_doc(list_to_binary(DbFileName)),
+        OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName ++ ".couch"]),
+        NewDbFileName = DbFileName ++ ".1525663363.couch",
+        NewDbFilePath = filename:join(
+            [DbDir, "shards/00000000-ffffffff/", NewDbFileName]
+        ),
+        ok = filelib:ensure_dir(NewDbFilePath),
+        file:delete(NewDbFilePath),
+        file:copy(OldDbFilePath, NewDbFilePath),
+        NewDbFilePath
+    end, DbFileNames).
 
-    %% make sure there is no left over
-    lists:foreach(fun(File) -> file:delete(File) end, Files),
-    file:copy(OldDbFilePath, NewDbFilePath),
-    {DbName, Files}.
 
-
-teardown({_DbName, Files}) ->
+teardown(Files) ->
     lists:foreach(fun(File) -> file:delete(File) end, Files).
 
 
@@ -42,33 +51,150 @@ purge_upgrade_test_() ->
         "Purge Upgrade tests",
         {
             setup,
-            fun test_util:start_couch/0, fun test_util:stop_couch/1,
+            fun chttpd_test_util:start_couch/0,
+            fun chttpd_test_util:stop_couch/1,
             {
                 foreach,
                 fun setup/0, fun teardown/1,
                 [
-                    %fun should_upgrade_legacy_db_with_0_purge_req/1,
-                    %fun should_upgrade_legacy_db_with_1_purge_req/1
-                    %fun should_upgrade_legacy_db_with_N_purge_req/1
+                    fun should_upgrade_legacy_db_without_purge_req/1,
+                    fun should_upgrade_legacy_db_with_1_purge_req/1,
+                    fun should_upgrade_legacy_db_with_N_purge_req/1
                 ]
             }
         }
     }.
 
 
-should_upgrade_legacy_db_with_1_purge_req({DbName, Files}) ->
+should_upgrade_legacy_db_without_purge_req(_Files) ->
     ?_test(begin
-        [_NewDbFilePath] = Files,
-        ok = config:set("query_server_config", "commit_freq", "0", false),
-        % add doc to trigger update
-        DocUrl = db_url(DbName) ++ "/boo",
-        {ok, Status, _Resp, _Body}  = test_request:put(
-            DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":3}">>),
-        ?assert(Status =:= 201 orelse Status =:= 202)
+        config:set("cluster", "q", "1"),
+        DbName = <<"db_without_purge_req">>,
+        DbUrl = db_url(DbName),
+
+        % 3 docs in legacy database before upgrade
+        % and added 2 new doc to database
+        {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"),
+        {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"),
+        {Json1} = ?JSON_DECODE(ResultBody1),
+        {ok, _, _, ResultBody2} = test_request:get(DbUrl),
+        {Json2} = ?JSON_DECODE(ResultBody2),
+        Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined),
+        IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]},
+        ?assert(5 =:= couch_util:get_value(<<"doc_count">>, Json2)),
+
+        IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
+        {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/",
+            [?CONTENT_JSON], IdsRevs),
+        ?assert(Code =:= 201),
+
+        {ok, _, _, ResultBody4} = test_request:get(DbUrl),
+        {Json4} = ?JSON_DECODE(ResultBody4),
+        ?assert(4 =:= couch_util:get_value(<<"doc_count">>, Json4))
+    end).
+
+
+should_upgrade_legacy_db_with_1_purge_req(_Files) ->
+    ?_test(begin
+        config:set("cluster", "q", "1"),
+        DbName = <<"db_with_1_purge_req">>,
+        DbUrl = db_url(DbName),
+
+        % 3 docs in legacy database and 1 of them were purged before upgrade
+        % and adding 2 new docs to database
+        {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"),
+        {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"),
+        {Json1} = ?JSON_DECODE(ResultBody1),
+        {ok, _, _, ResultBody2} = test_request:get(DbUrl),
+        {Json2} = ?JSON_DECODE(ResultBody2),
+        Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined),
+        IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]},
+        ?assert(4 =:= couch_util:get_value(<<"doc_count">>, Json2)),
+
+        IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
+        {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/",
+            [?CONTENT_JSON], IdsRevs),
+        ?assert(Code =:= 201),
+
+        {ok, _, _, ResultBody4} = test_request:get(DbUrl),
+        {Json4} = ?JSON_DECODE(ResultBody4),
+        ?assert(3 =:= couch_util:get_value(<<"doc_count">>, Json4)),
+        PurgeSeq = couch_util:get_value(<<"purge_seq">>, Json4),
+        [SeqNumber | _Rest] = binary:split(PurgeSeq, <<"-">>, [global]),
+        ?assert(<<"2">> =:= SeqNumber)
+    end).
+
+
+should_upgrade_legacy_db_with_N_purge_req(_Files) ->
+    ?_test(begin
+        config:set("cluster", "q", "1"),
+        DbName = <<"db_with_2_purge_req">>,
+        DbUrl = db_url(DbName),
+
+        % 3 docs in legacy database and 2 of them were purged before upgrade
+        % and adding 2 new doc to database
+        {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"),
+        {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"),
+        {Json1} = ?JSON_DECODE(ResultBody1),
+        {ok, _, _, ResultBody2} = test_request:get(DbUrl),
+        {Json2} = ?JSON_DECODE(ResultBody2),
+        Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined),
+        IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]},
+        ?assert(3 =:= couch_util:get_value(<<"doc_count">>, Json2)),
+
+        IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
+        {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/",
+            [?CONTENT_JSON], IdsRevs),
+        ?assert(Code =:= 201),
+
+        {ok, _, _, ResultBody4} = test_request:get(DbUrl),
+        {Json4} = ?JSON_DECODE(ResultBody4),
+        ?assert(2 =:= couch_util:get_value(<<"doc_count">>, Json4)),
+        PurgeSeq = couch_util:get_value(<<"purge_seq">>, Json4),
+        [SeqNumber | _Rest] = binary:split(PurgeSeq, <<"-">>, [global]),
+        ?assert(<<"3">> =:= SeqNumber)
     end).
 
 
 db_url(DbName) ->
     Addr = config:get("httpd", "bind_address", "127.0.0.1"),
-    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+    Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
     "http://"; ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
+
+create_doc(Url, Id) ->
+    test_request:put(Url ++ "/" ++ Id,
+        [?CONTENT_JSON], "{\"mr\": \"rockoartischocko\"}").
+
+write_db_doc(Id) ->
+    DbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
+    Doc = couch_doc:from_json_obj({[
+        {<<"_id">>, Id},
+        {<<"shard_suffix">>, ".1525663363"},
+        {<<"changelog">>,
+            [[<<"add">>, <<"00000000-ffffffff">>, <<"nonode@nohost">>]]
+        },
+        {<<"by_node">>, {[{<<"nonode@nohost">>, [<<"00000000-ffffffff">>]}]}},
+        {<<"by_range">>, {[{<<"00000000-ffffffff">>, [<<"nonode@nohost">>]}]}}
+    ]}),
+    write_db_doc(DbName, Doc, true).
+
+write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
+    {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
+    try couch_db:open_doc(Db, Id, [ejson_body]) of
+        {ok, #doc{body = Body}} ->
+            % the doc is already in the desired state, we're done here
+            ok;
+        {not_found, _} when ShouldMutate ->
+            try couch_db:update_doc(Db, Doc, []) of
+                {ok, _} ->
+                    ok
+            catch conflict ->
+                % check to see if this was a replication race or a different 
edit
+                write_db_doc(DbName, Doc, false)
+            end;
+        _ ->
+            % the doc already exists in a different state
+            conflict
+    after
+        couch_db:close(Db)
+    end.
diff --git a/src/couch/test/fixtures/db_with_1_purge_req.couch 
b/src/couch/test/fixtures/db_with_1_purge_req.couch
index fc01907..996e064 100644
Binary files a/src/couch/test/fixtures/db_with_1_purge_req.couch and 
b/src/couch/test/fixtures/db_with_1_purge_req.couch differ
diff --git a/src/couch/test/fixtures/db_with_2_purge_req.couch 
b/src/couch/test/fixtures/db_with_2_purge_req.couch
new file mode 100644
index 0000000..ee4e11b
Binary files /dev/null and b/src/couch/test/fixtures/db_with_2_purge_req.couch 
differ
diff --git a/src/couch/test/fixtures/db_without_purge_req.couch 
b/src/couch/test/fixtures/db_without_purge_req.couch
new file mode 100644
index 0000000..814feb8
Binary files /dev/null and b/src/couch/test/fixtures/db_without_purge_req.couch 
differ

-- 
To stop receiving notification emails like this one, please contact
jiangp...@apache.org.

Reply via email to