[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126275438
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_lru.erl
 ##
 @@ -0,0 +1,309 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+start_link/0,
+open/1,
+insert/2,
+refresh/2
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+pids, % pid -> key
+dbs, % dbname -> docid -> key -> pid
+evictor
+}).
+
+
+start_link() ->
+gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+open(Key) ->
+try ets:lookup(?CACHE, Key) of
+[] ->
+lru_start(Key, true);
+[#entry{pid = undefined}] ->
+lru_start(Key, false);
+[#entry{val = undefined, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, miss]),
+ddoc_cache_entry:open(Pid, Key);
+[#entry{val = Val, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, hit]),
+ddoc_cache_entry:accessed(Pid),
+{ok, Val}
+catch _:_ ->
+couch_stats:increment_counter([ddoc_cache, recovery]),
+ddoc_cache_entry:recover(Key)
+end.
+
+
+insert(Key, Value) ->
+case ets:lookup(?CACHE, Key) of
+[] ->
+gen_server:call(?MODULE, {start, Key, Value}, infinity);
+[#entry{}] ->
+ok
+end.
+
+
+refresh(DbName, DDocIds) ->
+gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
+
+
+init(_) ->
+process_flag(trap_exit, true),
+{ok, Pids} = khash:new(),
+{ok, Dbs} = khash:new(),
+{ok, Evictor} = couch_event:link_listener(
+?MODULE, handle_db_event, nil, [all_dbs]
+),
+{ok, #st{
+pids = Pids,
+dbs = Dbs,
+evictor = Evictor
+}}.
+
+
+terminate(_Reason, St) ->
+case is_pid(St#st.evictor) of
+true -> catch exit(St#st.evictor, kill);
+false -> ok
+end,
+ok.
+
+
+handle_call({start, Key, Default}, _From, St) ->
+#st{
+pids = Pids,
+dbs = Dbs
+} = St,
+case ets:lookup(?CACHE, Key) of
+[] ->
+MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+CurSize = ets:info(?CACHE, size),
+case trim(St, CurSize, max(0, MaxSize)) of
+ok ->
+true = ets:insert_new(?CACHE, #entry{key = Key}),
+{ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
+true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
+ok = khash:put(Pids, Pid, Key),
+store_key(Dbs, Key, Pid),
+{reply, {ok, Pid}, St};
+full ->
+?EVENT(full, Key),
+{reply, full, St}
+end;
+[#entry{pid = Pid}] ->
+{reply, {ok, Pid}, St}
+end;
+
+handle_call(Msg, _From, St) ->
+{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast({evict, DbName}, St) ->
+gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+{noreply, St};
+
+handle_cast({refresh, DbName, DDocIds}, St) ->
+gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
+{noreply, St};
+
+handle_cast({do_evict, DbName}, St) ->
+#st{
+dbs = Dbs
+} = St,
+ToRem = case khash:lookup(Dbs, DbName) of
+{value, DDocIds} ->
+AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
+khash:to_list(Keys) ++ Acc1
+end, []),
+?EVENT(evicted, DbName),
+AccOut;
+not_found ->
+?EVENT(evict_noop, DbName),
+[]
+end,
+lists:foreach(fun({Key, Pid}) ->
+remove_entry(St, Key, Pid)
+end, ToRem),
+khash:del(Dbs, DbName),
+{noreply, St};
+
+handle_cast({do_refresh, DbName, DDocIdList}, St) ->
+#st{
+dbs = Dbs
+} = St,
+% We prepend no_ddocid to the DDocIdList below
+% so that we refresh all custom and validation
+% function entries which load data from all
+% design documents.
+case khash:lookup(Dbs, DbName) of
+{value, DDocIds} ->
+

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126275389
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_lru.erl
 ##
 @@ -0,0 +1,309 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+start_link/0,
+open/1,
+insert/2,
+refresh/2
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+pids, % pid -> key
+dbs, % dbname -> docid -> key -> pid
+evictor
+}).
+
+
+start_link() ->
+gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+open(Key) ->
+try ets:lookup(?CACHE, Key) of
+[] ->
+lru_start(Key, true);
+[#entry{pid = undefined}] ->
+lru_start(Key, false);
+[#entry{val = undefined, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, miss]),
+ddoc_cache_entry:open(Pid, Key);
+[#entry{val = Val, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, hit]),
+ddoc_cache_entry:accessed(Pid),
+{ok, Val}
+catch _:_ ->
+couch_stats:increment_counter([ddoc_cache, recovery]),
+ddoc_cache_entry:recover(Key)
+end.
+
+
+insert(Key, Value) ->
+case ets:lookup(?CACHE, Key) of
+[] ->
+gen_server:call(?MODULE, {start, Key, Value}, infinity);
+[#entry{}] ->
+ok
+end.
+
+
+refresh(DbName, DDocIds) ->
+gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
+
+
+init(_) ->
+process_flag(trap_exit, true),
+{ok, Pids} = khash:new(),
+{ok, Dbs} = khash:new(),
+{ok, Evictor} = couch_event:link_listener(
+?MODULE, handle_db_event, nil, [all_dbs]
+),
+{ok, #st{
+pids = Pids,
+dbs = Dbs,
+evictor = Evictor
+}}.
+
+
+terminate(_Reason, St) ->
+case is_pid(St#st.evictor) of
+true -> catch exit(St#st.evictor, kill);
+false -> ok
+end,
+ok.
+
+
+handle_call({start, Key, Default}, _From, St) ->
+#st{
+pids = Pids,
+dbs = Dbs
+} = St,
+case ets:lookup(?CACHE, Key) of
+[] ->
+MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+CurSize = ets:info(?CACHE, size),
+case trim(St, CurSize, max(0, MaxSize)) of
+ok ->
+true = ets:insert_new(?CACHE, #entry{key = Key}),
+{ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
+true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
+ok = khash:put(Pids, Pid, Key),
+store_key(Dbs, Key, Pid),
+{reply, {ok, Pid}, St};
+full ->
+?EVENT(full, Key),
+{reply, full, St}
+end;
+[#entry{pid = Pid}] ->
+{reply, {ok, Pid}, St}
+end;
+
+handle_call(Msg, _From, St) ->
+{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast({evict, DbName}, St) ->
+gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+{noreply, St};
+
+handle_cast({refresh, DbName, DDocIds}, St) ->
+gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
+{noreply, St};
+
+handle_cast({do_evict, DbName}, St) ->
+#st{
+dbs = Dbs
+} = St,
+ToRem = case khash:lookup(Dbs, DbName) of
+{value, DDocIds} ->
+AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
+khash:to_list(Keys) ++ Acc1
+end, []),
+?EVENT(evicted, DbName),
+AccOut;
+not_found ->
+?EVENT(evict_noop, DbName),
+[]
+end,
+lists:foreach(fun({Key, Pid}) ->
 
 Review comment:
   Tiny nit: could throw a one-liner LC in there:
   ```
   [remove_entry(St, Key, Pid) || {Key, Pid} <- ToRem]
   ```
   Up to you, keep it if you like foreach better.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact 

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126275327
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_lru.erl
 ##
 @@ -0,0 +1,309 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+start_link/0,
+open/1,
+insert/2,
+refresh/2
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+pids, % pid -> key
+dbs, % dbname -> docid -> key -> pid
+evictor
+}).
+
+
+start_link() ->
+gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+open(Key) ->
+try ets:lookup(?CACHE, Key) of
+[] ->
+lru_start(Key, true);
+[#entry{pid = undefined}] ->
+lru_start(Key, false);
+[#entry{val = undefined, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, miss]),
+ddoc_cache_entry:open(Pid, Key);
+[#entry{val = Val, pid = Pid}] ->
+couch_stats:increment_counter([ddoc_cache, hit]),
+ddoc_cache_entry:accessed(Pid),
+{ok, Val}
+catch _:_ ->
+couch_stats:increment_counter([ddoc_cache, recovery]),
+ddoc_cache_entry:recover(Key)
+end.
+
+
+insert(Key, Value) ->
+case ets:lookup(?CACHE, Key) of
+[] ->
+gen_server:call(?MODULE, {start, Key, Value}, infinity);
+[#entry{}] ->
+ok
+end.
+
+
+refresh(DbName, DDocIds) ->
+gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
+
+
+init(_) ->
+process_flag(trap_exit, true),
+{ok, Pids} = khash:new(),
+{ok, Dbs} = khash:new(),
+{ok, Evictor} = couch_event:link_listener(
+?MODULE, handle_db_event, nil, [all_dbs]
+),
+{ok, #st{
+pids = Pids,
+dbs = Dbs,
+evictor = Evictor
+}}.
+
+
+terminate(_Reason, St) ->
+case is_pid(St#st.evictor) of
+true -> catch exit(St#st.evictor, kill);
 
 Review comment:
   Minor thing: Is catch needed? `exit` shouldn't crash and always return true, 
if we already checked for evictor being a pid.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new pull request #657: Remove deprecated OAuth 1.0 implementation

2017-07-07 Thread git
wohali opened a new pull request #657: Remove deprecated OAuth 1.0 
implementation
URL: https://github.com/apache/couchdb/pull/657
 
 
   Helps resolve issue #656. Implementation broken since bigcouch merge.
   
   Replicator oauth hooks are left in place for future work towards
   adding cookie-based authentication support.
   
   ## Related Pull Requests
   
   https://github.com/apache/couchdb-documentation/pull/142
   
   ## Checklist
   
   - [X] Code is written and works correctly;
   - [X] Changes are covered by tests;
   - [X] Documentation reflects the changes;
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126275240
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_entry.erl
 ##
 @@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+
+
+-export([
+dbname/1,
+ddocid/1,
+recover/1,
+insert/2,
+
+start_link/2,
+shutdown/1,
+open/2,
+accessed/1,
+refresh/1
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+key,
+val,
+opener,
+waiters,
+ts,
+accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+{ok, Pid}.
+
+
+shutdown(Pid) ->
+ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+try
+Resp = gen_server:call(Pid, open),
+case Resp of
+{open_ok, Val} ->
+Val;
+{open_error, {T, R, S}} ->
+erlang:raise(T, R, S)
+end
+catch exit:_ ->
+% Its possible that this process was evicted just
+% before we tried talking to it. Just fallback
+% to a standard recovery
+recover(Key)
+end.
+
+
+accessed(Pid) ->
+gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+St = #st{
+key = Key,
+opener = spawn_opener(Key),
+waiters = [],
+accessed = 1
+},
+?EVENT(started, Key),
+gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+Updates = [
+{#entry.val, Default},
+{#entry.pid, self()}
+],
+NewTs = os:timestamp(),
+true = ets:update_element(?CACHE, Key, Updates),
+true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+St = #st{
+key = Key,
+val = {open_ok, {ok, Default}},
+opener = start_timer(),
+waiters = undefined,
+ts = NewTs,
+accessed = 1
+},
+?EVENT(default_started, Key),
+gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+#st{
+key = Key,
+opener = Pid,
+ts = Ts
+} = St,
+% We may have already deleted our cache entry
+% during shutdown
+Pattern = #entry{key = Key, pid = self(), _ = '_'},
+CacheMSpec = [{Pattern, [], [true]}],
+true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+% We may have already deleted our LRU entry
+% during shutdown
+if Ts == undefined -> ok; true ->
+LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+true = ets:select_delete(?LRU, LruMSpec) < 2
+end,
+% Blow away any current opener if it exists
+if not is_pid(Pid) -> ok; true ->
+catch exit(Pid, kill)
+end,
+ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+NewSt = St#st{
+waiters = [From | St#st.waiters]
+},
+{noreply, NewSt};
+
+handle_call(open, _From, St) ->
+{reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+remove_from_cache(St),
+{stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
 
 Review comment:
   Running the `ddoc_cache_speed` test with 1500 to 2000 workers saw that cache 
entry processes had `accessed` messages backed up in their queues (300-1000) 
messages. They seemed to be in a steady state so they didn't seem to increase 
much.
   
   But if that's something we'd want to mitigate, we could something like not 
calling update_lru on every accessed message. So for example on each update_lru 
call put a 'last_update_timestamp' in the process dictionary or state and only 
update again if last update was more than 1 second old. That way in most cases 
`accessed` cast handler will just drain the messages and get back to waiting 
for more message to prevent messages from accumulating.
 

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126275106
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_entry.erl
 ##
 @@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+
+
+-export([
+dbname/1,
+ddocid/1,
+recover/1,
+insert/2,
+
+start_link/2,
+shutdown/1,
+open/2,
+accessed/1,
+refresh/1
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+key,
+val,
+opener,
+waiters,
+ts,
+accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+{ok, Pid}.
+
+
+shutdown(Pid) ->
+ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+try
+Resp = gen_server:call(Pid, open),
+case Resp of
+{open_ok, Val} ->
+Val;
+{open_error, {T, R, S}} ->
+erlang:raise(T, R, S)
+end
+catch exit:_ ->
+% Its possible that this process was evicted just
+% before we tried talking to it. Just fallback
+% to a standard recovery
+recover(Key)
+end.
+
+
+accessed(Pid) ->
+gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+St = #st{
+key = Key,
+opener = spawn_opener(Key),
+waiters = [],
+accessed = 1
+},
+?EVENT(started, Key),
+gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+Updates = [
+{#entry.val, Default},
+{#entry.pid, self()}
+],
+NewTs = os:timestamp(),
+true = ets:update_element(?CACHE, Key, Updates),
+true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+St = #st{
+key = Key,
+val = {open_ok, {ok, Default}},
+opener = start_timer(),
+waiters = undefined,
+ts = NewTs,
+accessed = 1
+},
+?EVENT(default_started, Key),
+gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+#st{
+key = Key,
+opener = Pid,
+ts = Ts
+} = St,
+% We may have already deleted our cache entry
+% during shutdown
+Pattern = #entry{key = Key, pid = self(), _ = '_'},
+CacheMSpec = [{Pattern, [], [true]}],
+true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+% We may have already deleted our LRU entry
+% during shutdown
+if Ts == undefined -> ok; true ->
+LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+true = ets:select_delete(?LRU, LruMSpec) < 2
+end,
+% Blow away any current opener if it exists
+if not is_pid(Pid) -> ok; true ->
+catch exit(Pid, kill)
+end,
+ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+NewSt = St#st{
+waiters = [From | St#st.waiters]
+},
+{noreply, NewSt};
+
+handle_call(open, _From, St) ->
+{reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+remove_from_cache(St),
+{stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+?EVENT(accessed, St#st.key),
+drain_accessed(),
+NewSt = St#st{
+accessed = St#st.accessed + 1
+},
+{noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+% If we had frequent design document updates
+% they could end up racing accessed events and
+% end up prematurely evicting this entry from
+% cache. To prevent this we just make sure that
+% accessed is set to at least 1 before we
+% execute a refresh.
+NewSt = if St#st.accessed > 0 -> St; true ->
+St#st{accessed = 1}
+end,
+handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+{stop, normal, St};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+#st{
+key = Key
+   

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126274920
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_entry.erl
 ##
 @@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+
+
+-export([
+dbname/1,
+ddocid/1,
+recover/1,
+insert/2,
+
+start_link/2,
+shutdown/1,
+open/2,
+accessed/1,
+refresh/1
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+key,
+val,
+opener,
+waiters,
+ts,
+accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+{ok, Pid}.
+
+
+shutdown(Pid) ->
+ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+try
+Resp = gen_server:call(Pid, open),
+case Resp of
+{open_ok, Val} ->
+Val;
+{open_error, {T, R, S}} ->
+erlang:raise(T, R, S)
+end
+catch exit:_ ->
+% Its possible that this process was evicted just
+% before we tried talking to it. Just fallback
+% to a standard recovery
+recover(Key)
+end.
+
+
+accessed(Pid) ->
+gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+St = #st{
+key = Key,
+opener = spawn_opener(Key),
+waiters = [],
+accessed = 1
+},
+?EVENT(started, Key),
+gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+Updates = [
+{#entry.val, Default},
+{#entry.pid, self()}
+],
+NewTs = os:timestamp(),
+true = ets:update_element(?CACHE, Key, Updates),
+true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+St = #st{
+key = Key,
+val = {open_ok, {ok, Default}},
+opener = start_timer(),
+waiters = undefined,
+ts = NewTs,
+accessed = 1
+},
+?EVENT(default_started, Key),
+gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+#st{
+key = Key,
+opener = Pid,
+ts = Ts
+} = St,
+% We may have already deleted our cache entry
+% during shutdown
+Pattern = #entry{key = Key, pid = self(), _ = '_'},
+CacheMSpec = [{Pattern, [], [true]}],
+true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+% We may have already deleted our LRU entry
+% during shutdown
+if Ts == undefined -> ok; true ->
+LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+true = ets:select_delete(?LRU, LruMSpec) < 2
+end,
+% Blow away any current opener if it exists
+if not is_pid(Pid) -> ok; true ->
+catch exit(Pid, kill)
+end,
+ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+NewSt = St#st{
+waiters = [From | St#st.waiters]
+},
+{noreply, NewSt};
+
+handle_call(open, _From, St) ->
+{reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+remove_from_cache(St),
+{stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+?EVENT(accessed, St#st.key),
+drain_accessed(),
+NewSt = St#st{
+accessed = St#st.accessed + 1
+},
+{noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+% If we had frequent design document updates
+% they could end up racing accessed events and
+% end up prematurely evicting this entry from
+% cache. To prevent this we just make sure that
+% accessed is set to at least 1 before we
+% execute a refresh.
+NewSt = if St#st.accessed > 0 -> St; true ->
+St#st{accessed = 1}
+end,
+handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+{stop, normal, St};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+#st{
+key = Key
+   

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126274920
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_entry.erl
 ##
 @@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+
+
+-export([
+dbname/1,
+ddocid/1,
+recover/1,
+insert/2,
+
+start_link/2,
+shutdown/1,
+open/2,
+accessed/1,
+refresh/1
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+key,
+val,
+opener,
+waiters,
+ts,
+accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+{ok, Pid}.
+
+
+shutdown(Pid) ->
+ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+try
+Resp = gen_server:call(Pid, open),
+case Resp of
+{open_ok, Val} ->
+Val;
+{open_error, {T, R, S}} ->
+erlang:raise(T, R, S)
+end
+catch exit:_ ->
+% Its possible that this process was evicted just
+% before we tried talking to it. Just fallback
+% to a standard recovery
+recover(Key)
+end.
+
+
+accessed(Pid) ->
+gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+St = #st{
+key = Key,
+opener = spawn_opener(Key),
+waiters = [],
+accessed = 1
+},
+?EVENT(started, Key),
+gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+Updates = [
+{#entry.val, Default},
+{#entry.pid, self()}
+],
+NewTs = os:timestamp(),
+true = ets:update_element(?CACHE, Key, Updates),
+true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+St = #st{
+key = Key,
+val = {open_ok, {ok, Default}},
+opener = start_timer(),
+waiters = undefined,
+ts = NewTs,
+accessed = 1
+},
+?EVENT(default_started, Key),
+gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+#st{
+key = Key,
+opener = Pid,
+ts = Ts
+} = St,
+% We may have already deleted our cache entry
+% during shutdown
+Pattern = #entry{key = Key, pid = self(), _ = '_'},
+CacheMSpec = [{Pattern, [], [true]}],
+true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+% We may have already deleted our LRU entry
+% during shutdown
+if Ts == undefined -> ok; true ->
+LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+true = ets:select_delete(?LRU, LruMSpec) < 2
+end,
+% Blow away any current opener if it exists
+if not is_pid(Pid) -> ok; true ->
+catch exit(Pid, kill)
+end,
+ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+NewSt = St#st{
+waiters = [From | St#st.waiters]
+},
+{noreply, NewSt};
+
+handle_call(open, _From, St) ->
+{reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+remove_from_cache(St),
+{stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+?EVENT(accessed, St#st.key),
+drain_accessed(),
+NewSt = St#st{
+accessed = St#st.accessed + 1
+},
+{noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+% If we had frequent design document updates
+% they could end up racing accessed events and
+% end up prematurely evicting this entry from
+% cache. To prevent this we just make sure that
+% accessed is set to at least 1 before we
+% execute a refresh.
+NewSt = if St#st.accessed > 0 -> St; true ->
+St#st{accessed = 1}
+end,
+handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+{stop, normal, St};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+#st{
+key = Key
+   

[GitHub] nickva commented on a change in pull request #610: Optimize ddoc cache

2017-07-07 Thread git
nickva commented on a change in pull request #610: Optimize ddoc cache
URL: https://github.com/apache/couchdb/pull/610#discussion_r126274885
 
 

 ##
 File path: src/ddoc_cache/src/ddoc_cache_entry.erl
 ##
 @@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+
+
+-export([
+dbname/1,
+ddocid/1,
+recover/1,
+insert/2,
+
+start_link/2,
+shutdown/1,
+open/2,
+accessed/1,
+refresh/1
+]).
+
+-export([
+init/1,
+terminate/2,
+handle_call/3,
+handle_cast/2,
+handle_info/2,
+code_change/3
+]).
+
+-export([
+do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+key,
+val,
+opener,
+waiters,
+ts,
+accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+{ok, Pid}.
+
+
+shutdown(Pid) ->
+ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+try
+Resp = gen_server:call(Pid, open),
+case Resp of
+{open_ok, Val} ->
+Val;
+{open_error, {T, R, S}} ->
+erlang:raise(T, R, S)
+end
+catch exit:_ ->
+% Its possible that this process was evicted just
+% before we tried talking to it. Just fallback
+% to a standard recovery
+recover(Key)
+end.
+
+
+accessed(Pid) ->
+gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+St = #st{
+key = Key,
+opener = spawn_opener(Key),
+waiters = [],
+accessed = 1
+},
+?EVENT(started, Key),
+gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+Updates = [
+{#entry.val, Default},
+{#entry.pid, self()}
+],
+NewTs = os:timestamp(),
+true = ets:update_element(?CACHE, Key, Updates),
+true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+St = #st{
+key = Key,
+val = {open_ok, {ok, Default}},
+opener = start_timer(),
+waiters = undefined,
+ts = NewTs,
+accessed = 1
+},
+?EVENT(default_started, Key),
+gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+#st{
+key = Key,
+opener = Pid,
+ts = Ts
+} = St,
+% We may have already deleted our cache entry
+% during shutdown
+Pattern = #entry{key = Key, pid = self(), _ = '_'},
+CacheMSpec = [{Pattern, [], [true]}],
+true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+% We may have already deleted our LRU entry
+% during shutdown
+if Ts == undefined -> ok; true ->
+LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+true = ets:select_delete(?LRU, LruMSpec) < 2
+end,
+% Blow away any current opener if it exists
+if not is_pid(Pid) -> ok; true ->
+catch exit(Pid, kill)
+end,
+ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+NewSt = St#st{
+waiters = [From | St#st.waiters]
+},
+{noreply, NewSt};
+
+handle_call(open, _From, St) ->
+{reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+remove_from_cache(St),
+{stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+?EVENT(accessed, St#st.key),
+drain_accessed(),
+NewSt = St#st{
+accessed = St#st.accessed + 1
+},
+{noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+% If we had frequent design document updates
+% they could end up racing accessed events and
+% end up prematurely evicting this entry from
+% cache. To prevent this we just make sure that
+% accessed is set to at least 1 before we
+% execute a refresh.
+NewSt = if St#st.accessed > 0 -> St; true ->
+St#st{accessed = 1}
+end,
+handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+{stop, normal, St};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+#st{
+key = Key
+   

[GitHub] wohali opened a new pull request #142: Remove OAuth documentation

2017-07-07 Thread git
wohali opened a new pull request #142: Remove OAuth documentation
URL: https://github.com/apache/couchdb-documentation/pull/142
 
 
   Helps resolve issue #656
   
   Do not merge until a similar PR on apache/couchdb is completed!
   
   ## Checklist
   
   - [ ] Documentation is written and is accurate;
   - [ ] `make check` passes with no errors
   - [ ] Update 
[rebar.config.script](https://github.com/apache/couchdb/blob/master/rebar.config.script)
 with the commit hash once this PR is rebased and merged
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new issue #656: Remove OAuth implementation

2017-07-07 Thread git
wohali opened a new issue #656: Remove OAuth implementation
URL: https://github.com/apache/couchdb/issues/656
 
 
   CouchDB's OAuth implementation is OAuth 1.0 (not 2.0), supports 2-legged 
authentication only, and does not work with the CouchDB 2.x clustered interface.
   
   As the implementation needs to be completely reworked, I am recommending we 
remove the current OAuth functionality for the 2.1 release.
   
   @rnewson gave this a +1 today; if you have any strong objections, please say 
so before I complete the work to remove the (broken) functionality.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[jira] [Closed] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread Joan Touzet (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joan Touzet closed COUCHDB-3431.

Resolution: Fixed

> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Commented] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread Joan Touzet (JIRA)

[ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16078936#comment-16078936
 ] 

Joan Touzet commented on COUCHDB-3431:
--

Reopening just to note that we even state this in the response:

$ curl -X PUT localhost:15984/foo/bar -d '[{}]'
{"error":"bad_request","reason":"Document must be a JSON object"}

Even though what you are posting is valid JSON, it is not a valid CouchDB 
document. Sorry.

> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Reopened] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread Joan Touzet (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joan Touzet reopened COUCHDB-3431:
--

> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[GitHub] wohali commented on issue #655: Failure waiting for db shutdown in couchdb_1283 test

2017-07-07 Thread git
wohali commented on issue #655: Failure waiting for db shutdown in couchdb_1283 
test
URL: https://github.com/apache/couchdb/issues/655#issuecomment-313831089
 
 
   FYI, a reminder that 
[COUCHDB-1283](https://issues.apache.org/jira/browse/COUCHDB-1283) is: 
"Impossible to compact view groups when number of active databases > 
max_dbs_open". Is a fix for this issue still valid for 2.0, @rnewson ?
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #655: Failure waiting for db shutdown in couchdb_1283 test

2017-07-07 Thread git
wohali commented on issue #655: Failure waiting for db shutdown in couchdb_1283 
test
URL: https://github.com/apache/couchdb/issues/655#issuecomment-313831089
 
 
   FYI, a reminder that COUCHDB-1283 is: "Impossible to compact view groups 
when number of active databases > max_dbs_open". Is a fix for this issue still 
valid for 2.0, @rnewson ?
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new issue #655: Failure waiting for db shutdown in couchdb_1283 test

2017-07-07 Thread git
wohali opened a new issue #655: Failure waiting for db shutdown in couchdb_1283 
test
URL: https://github.com/apache/couchdb/issues/655
 
 
   ## Failure scenario
   * 
https://builds.apache.org/blue/organizations/jenkins/CouchDB/detail/master/62/pipeline/43
 * 
   ```
   couchdb_views_tests:266: couchdb_1309...[0.188 s] ok
   [os_mon] memory supervisor port (memsup): Erlang has closed
   [os_mon] cpu supervisor port (cpu_sup): Erlang has closed
   [done in 1.286 s]
 View group shutdown
   couchdb_views_tests:315: couchdb_1283...*failed*
   in function couchdb_views_tests:wait_for_process_shutdown/3 
(test/couchdb_views_tests.erl, line 413)
   in call from couchdb_views_tests:'-couchdb_1283/0-fun-21-'/0 
(test/couchdb_views_tests.erl, line 358)
   **error:{assertion_failed,[{module,couchdb_views_tests},
  {line,415},
  {reason,"Failure waiting for db shutdown"}]}
 output:<<"">>
   ```
   
   couch.log from a normal run:
   ```
   [notice] 2017-07-07T22:56:49.895207Z nonode@nohost <0.28311.0>  
config: [couchdb] max_dbs_open set to 3 for reason nil
   [notice] 2017-07-07T22:56:49.895759Z nonode@nohost <0.28311.0>  
config: [couchdb] delayed_commits set to false for reason nil
   [info] 2017-07-07T22:56:50.079629Z nonode@nohost <0.28540.0>  
Opening index for db: eunit-test-db-1499468209895967 idx: _design/foo sig: 
"0963a19eb3ef007218f1e11f0aefa2d9"
   [info] 2017-07-07T22:56:50.079947Z nonode@nohost <0.28543.0>  
Starting index update for db: eunit-test-db-1499468209895967 idx: _design/foo
   [notice] 2017-07-07T22:56:50.140386Z nonode@nohost <0.28480.0>  
127.0.0.1 - - GET /eunit-test-db-1499468209895967/_design/foo/_view/foo 200
   [info] 2017-07-07T22:56:50.142498Z nonode@nohost <0.28543.0>  Index 
update finished for db: eunit-test-db-1499468209895967 idx: _design/foo
   [info] 2017-07-07T22:56:50.183784Z nonode@nohost <0.28523.0>  Index 
shutdown by monitor notice for db: eunit-test-db-1499468025801812 idx: 
_design/_auth
   [info] 2017-07-07T22:56:50.184687Z nonode@nohost <0.28523.0>  
Closing index for db: eunit-test-db-1499468025801812 idx: _design/_auth sig: 
"3e823c2a4383ac0c18d4e574135a5b08" because normal
   [info] 2017-07-07T22:56:50.218193Z nonode@nohost <0.28540.0>  Index 
shutdown by monitor notice for db: eunit-test-db-1499468209895967 idx: 
_design/foo
   [info] 2017-07-07T22:56:50.218838Z nonode@nohost <0.28540.0>  
Closing index for db: eunit-test-db-1499468209895967 idx: _design/foo sig: 
"0963a19eb3ef007218f1e11f0aefa2d9" because normal
   [info] 2017-07-07T22:56:50.252870Z nonode@nohost <0.28565.0>  
Opening index for db: eunit-test-db-1499468209895967 idx: _design/foo sig: 
"0963a19eb3ef007218f1e11f0aefa2d9"
   [info] 2017-07-07T22:56:50.253604Z nonode@nohost <0.28569.0>  
Compaction started for db: eunit-test-db-1499468209895967 idx: _design/foo
   [notice] 2017-07-07T22:56:50.257055Z nonode@nohost <0.28565.0>  
Compaction swap for view 
/home/joant/couchdb/tmp/data/.eunit-test-db-1499468209895967_design/mrview/0963a19eb3ef007218f1e11f0aefa2d9.view
 4227 4200
   [info] 2017-07-07T22:56:50.257720Z nonode@nohost <0.28569.0>  
Compaction finished for db: eunit-test-db-1499468209895967 idx: _design/foo
   [info] 2017-07-07T22:56:50.258556Z nonode@nohost <0.28565.0>  Index 
shutdown by monitor notice for db: eunit-test-db-1499468209895967 idx: 
_design/foo
   [info] 2017-07-07T22:56:50.259171Z nonode@nohost <0.28565.0>  
Closing index for db: eunit-test-db-1499468209895967 idx: _design/foo sig: 
"0963a19eb3ef007218f1e11f0aefa2d9" because normal
   [info] 2017-07-07T22:56:50.282171Z nonode@nohost <0.7.0>  
Application couch exited with reason: stopped
   ```
   
   couch.log from **THIS** run. _What is all this other activity?_:
   ```
   [notice] 2017-07-07T16:56:53.688512Z nonode@nohost <0.21667.1>  
config: [couchdb] max_dbs_open set to 3 for reason nil
   [notice] 2017-07-07T16:56:53.688851Z nonode@nohost <0.21667.1>  
config: [couchdb] delayed_commits set to false for reason nil
   [info] 2017-07-07T16:56:53.702918Z nonode@nohost <0.21896.1>  
Opening index for db: eunit-test-db-1499446219830124 idx: _design/_auth sig: 
"3e823c2a4383ac0c18d4e574135a5b08"
   [info] 2017-07-07T16:56:53.750851Z nonode@nohost <0.21904.1>  
Opening index for db: eunit-test-db-1499446220623551 idx: _design/_auth sig: 
"3e823c2a4383ac0c18d4e574135a5b08"
   [info] 2017-07-07T16:56:53.753143Z nonode@nohost <0.21876.1>  Index 
shutdown by monitor notice for db: eunit-test-db-1499446516999249 idx: 
_design/_auth
   [info] 2017-07-07T16:56:53.753985Z nonode@nohost <0.21876.1>  
Closing index for db: eunit-test-db-1499446516999249 idx: _design/_auth sig: 
"3e823c2a4383ac0c18d4e574135a5b08" because normal
   [info] 2017-07-07T16:56:53.817388Z nonode@nohost <0.21896.1> 

[jira] [Closed] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread Joan Touzet (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joan Touzet closed COUCHDB-3431.

Resolution: Invalid

CouchDB is a document-oriented database, not an array-oriented database. As 
such, we require the top-level structure to be an object, not an array.

This is because, at the very least, we have to place an "_id":"" in the 
object to act as the "primary key" via which to access the document itself. We 
couldn't do this if the top-level is an array.


> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[GitHub] wohali commented on issue #554: JS stats test has inconsistent results

2017-07-07 Thread git
wohali commented on issue #554: JS stats test has inconsistent results
URL: https://github.com/apache/couchdb/issues/554#issuecomment-313824985
 
 
   I have fully disabled this test for now. I'm starting to think that the 
dynamic behaviour of the clustered setup means these stats don't make as much 
sense anymore -- and that the test needs a complete rework.
   
   We can revisit after 2.1.0.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali closed issue #554: JS stats test has inconsistent results

2017-07-07 Thread git
wohali closed issue #554: JS stats test has inconsistent results
URL: https://github.com/apache/couchdb/issues/554
 
 
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] seantanly commented on issue #19: Upgrading Fauxton within 2.0.0 image

2017-07-07 Thread git
seantanly commented on issue #19: Upgrading Fauxton within 2.0.0 image
URL: https://github.com/apache/couchdb-docker/issues/19#issuecomment-313823027
 
 
   @garrensmith Thank you for the pointer, I opted to upgrade the prepackaged 
Fauxton within the image instead of running a separate Fauxton container. 
   
   I managed to do so by commenting out the purging of build deps & 
node_modules folder, and added `git` in `apt-get install` for `npm install -g 
fauxton` to work. Then I proceeded to replace the prepackaged Fauxton by doing
   
   ```
   rm -rf /opt/couchdb/share/www
   cp -Rp /usr/lib/node_modules/fauxton/dist/release /opt/couchdb/share/www
   ```
   
   This way, I can still access the upgraded Fauxton using the original 
`/_utils` path without further configuration. The drawbacks is the image is 
much larger compared to the original because the build deps are kept around to 
allow upgrading of Fauxton when needed.
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #571: eunit context cleanup times out while cleaning up os daemons

2017-07-07 Thread git
wohali commented on issue #571: eunit context cleanup times out while cleaning 
up os daemons
URL: https://github.com/apache/couchdb/issues/571#issuecomment-313821982
 
 
   Haven't seen this again in a bit. Closing for now...
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali closed issue #571: eunit context cleanup times out while cleaning up os daemons

2017-07-07 Thread git
wohali closed issue #571: eunit context cleanup times out while cleaning up os 
daemons
URL: https://github.com/apache/couchdb/issues/571
 
 
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #643: EUnit couchdb_views_tests failure: suspend_process after termination

2017-07-07 Thread git
wohali commented on issue #643: EUnit couchdb_views_tests failure: 
suspend_process after termination
URL: https://github.com/apache/couchdb/issues/643#issuecomment-313820358
 
 
   I believe these are being caused by #652.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali commented on issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313819946
 
 
   Note that I could see the flipside of this ticket - that we should leave it 
enabled and deal with the tests that are failing instead like 
https://github.com/apache/couchdb/issues/649 and 
couchdb_views_tests:couchdb_1283. Let me know.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #649: couch_mrview_changes_since_tests gen_server failing with unknown_info

2017-07-07 Thread git
wohali commented on issue #649: couch_mrview_changes_since_tests gen_server 
failing with unknown_info
URL: https://github.com/apache/couchdb/issues/649#issuecomment-313819812
 
 
   I think this is because of #652 in the eunit test - but do we want to keep 
this issue to track fixing the race condition? Keep in mind that once the PR is 
merged, we won't easily reproduce this bug anymore.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #649: couch_mrview_changes_since_tests gen_server failing with unknown_info

2017-07-07 Thread git
wohali commented on issue #649: couch_mrview_changes_since_tests gen_server 
failing with unknown_info
URL: https://github.com/apache/couchdb/issues/649#issuecomment-313819812
 
 
   I think this is because of #652 in the eunit test - but do we want to keep 
this issue to track fixing the race condition?
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new pull request #654: Disable compaction daemon on eunit run couch startups

2017-07-07 Thread git
wohali opened a new pull request #654: Disable compaction daemon on eunit run 
couch startups
URL: https://github.com/apache/couchdb/pull/654
 
 
   Commit 21f9544 enabled the compaction daemon by default. And commit
   3afe3ad disabled the compaction daemon at the start of the compaction
   daemon tests. Unfortunately, the compaction daemon remains active
   during all the other EUnit tests.
   
   I attempted to override the [compactions] _default line in the file
   rel/files/eunit.ini but specifying `_default=` or `_default=[]` did not
   provide the desired behaviour.
   
   This change disables the _default config as part of
   `test_util:start_couch` after startup. This means there is a very
   brief period during which the daemon is running, but in empirical
   testing the only thing I've seen it manage to do is compact `_dbs`
   before being disabled.
   
   Tested with `make soak-eunit` for 3 hours; this seems to eliminate
   the `all_dbs_active` error we've been seeing in CI runs since the
   compaction daemon was enabled by default.
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali commented on issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313814388
 
 
   So @janl I think your change only disables the daemon going into the 
compaction daemon tests. I think the daemon is still running for all the other 
tests.
   
   I have a change to `test_util:start_couch` that I am testing in a soak loop 
now. If there are no failures I'll submit a PR.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] eiri commented on issue #526: Fix _local_docs end-point

2017-07-07 Thread git
eiri commented on issue #526: Fix  _local_docs end-point
URL: https://github.com/apache/couchdb/pull/526#issuecomment-313802174
 
 
   @davisp So I went ahead, reverted the new module and modified 
`fabric_view_all_docs.erl` instead. Does it looks better to you?
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali commented on issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313790860
 
 
   That as may be, empirically I'm seeing the compaction daemon running during 
other eunit tests. More investigation is required.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] janl commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
janl commented on issue #652: Compaction daemon is interfering with eunit tests 
(all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313776529
 
 
   I had patched that though: https://github.com/apache/couchdb/pull/624/files
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[Jenkins] FAILURE: CouchDB » master #63

2017-07-07 Thread Apache Jenkins Server
Boo, we failed. https://builds.apache.org/job/CouchDB/job/master/63/

[GitHub] eiri closed pull request #645: Pass db open options to fabric_view_map

2017-07-07 Thread git
eiri closed pull request #645: Pass db open options to fabric_view_map
URL: https://github.com/apache/couchdb/pull/645
 
 
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] eiri commented on issue #653: User context not passed in `_show` for `_users` database

2017-07-07 Thread git
eiri commented on issue #653: User context not passed in `_show` for `_users` 
database
URL: https://github.com/apache/couchdb/issues/653#issuecomment-313755237
 
 
   Reference issue apache/couchdb#645
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] eiri opened a new issue #653: User context not passed in `_show` for `_users` database

2017-07-07 Thread git
eiri opened a new issue #653: User context not passed in `_show` for `_users` 
database
URL: https://github.com/apache/couchdb/issues/653
 
 
   ## Expected behavior
   
   View's `_show` supposed to work on admin-only databases, such as `_users` 
when accessed with admin privileges.
   
   ## Current behavior
   
   On attempt to access `_show` on `_users` database error 500 raised:
   ```
   {
   "error": "case_clause",
   "reason": "{forbidden,<<\"Only administrators can view design docs in 
the users database.\">>}",
   "ref": 1517023101
   }
   ```
   
   ## Possible Solution
   User context need to be passed to fabric's open_doc 
[here](https://github.com/apache/couchdb/blob/master/src/chttpd/src/chttpd_show.erl#L26)
   
   ## Steps to Reproduce
   In `_users` db create following two ddocs:
   ```
   {
   "_id": "_design/users",
   "views": {
   "names": {
   "map": "function(doc) { emit(doc.name); }"
   }
   },
   "lists": {
   "names": "function(head, req) { var row; while (row = getRow()) { 
send(\"name: \" + row.key + \"\\n\"); } }"
   }
   }
   
   {
   "_id": "_design/show-function-examples",
   "shows": {
   "summary": "function(doc, req) {return '' + doc.name + '';}"
   }
   }
   ```
   
   Run local cluster and query with `curl -u $CRED 
http://localhost:15984/_users/_design/show-function-examples/_show/summary/_design/users`
   
   ## Your Environment
   * Version used: 2.1.0-2f45a72cd
   * Browser Name and version: httpie 9.9.9
   * Operating System and version (desktop or mobile): Darwin Kernel Version 
15.6.0
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] tonysun83 commented on issue #645: Pass db open options to fabric_view_map

2017-07-07 Thread git
tonysun83 commented on issue #645: Pass db open options to fabric_view_map
URL: https://github.com/apache/couchdb/pull/645#issuecomment-313752478
 
 
   Tested _show and that lead to 500 
   ```
   [error] 2017-07-07T17:59:08.236067Z node1@127.0.0.1 <0.7836.0> 354b71b309 
req_err(1517023101) case_clause : {forbidden,<<"Only administrators can view 
design docs in the users database.">>}
   [<<"chttpd_show:maybe_open_doc/2 
L26">>,<<"chttpd_show:handle_doc_show_req/3 L52">>,<<"chttpd:process_request/1 
L295">>,<<"chttpd:handle_request_int/1 L231">>,<<"mochiweb_http:headers/6 
L122">>,<<"proc_lib:init_p_do_apply/3 L237">>]
   ```
   This is a different bug though.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] tonysun83 commented on issue #645: Pass db open options to fabric_view_map

2017-07-07 Thread git
tonysun83 commented on issue #645: Pass db open options to fabric_view_map
URL: https://github.com/apache/couchdb/pull/645#issuecomment-313752494
 
 
   +1
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[Jenkins] FAILURE: CouchDB » master #62

2017-07-07 Thread Apache Jenkins Server
Boo, we failed. https://builds.apache.org/job/CouchDB/job/master/62/

[jira] [Issue Comment Deleted] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread glen (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

glen updated COUCHDB-3431:
--
Comment: was deleted

(was: Responded tested a different input)

> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Reopened] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread glen (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

glen reopened COUCHDB-3431:
---

Responded tested a different input

> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[GitHub] wohali closed pull request #651: JS test cleanup x3

2017-07-07 Thread git
wohali closed pull request #651: JS test cleanup x3
URL: https://github.com/apache/couchdb/pull/651
 
 
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[jira] [Closed] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread Joan Touzet (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3431?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joan Touzet closed COUCHDB-3431.

Resolution: Fixed

It's whatever you're doing for encoding that is wrong. It definitely works.

$ curl -X PUT localhost:15984/foo
{"ok":true}
$ curl -X PUT localhost:15984/foo/bar -d '{"array":[{}]}'
{"ok":true,"id":"bar","rev":"1-3ff089b81196a0ce18d927c093a9b3de"}
 $ curl localhost:15984/foo/bar
{"_id":"bar","_rev":"1-3ff089b81196a0ce18d927c093a9b3de","array":[{}]}


> 400 error when posting valid JSON
> -
>
> Key: COUCHDB-3431
> URL: https://issues.apache.org/jira/browse/COUCHDB-3431
> Project: CouchDB
>  Issue Type: Bug
>Affects Versions: 2.0.0
>Reporter: glen
>
> Is anything invalid about the first member of an array being an empty object?
> Please post [{}] to your Couch. Does your Couch return invalid json 400 
> error, or is it just me?



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Closed] (COUCHDB-3432) make fails at jiffy

2017-07-07 Thread Joan Touzet (JIRA)

 [ 
https://issues.apache.org/jira/browse/COUCHDB-3432?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Joan Touzet closed COUCHDB-3432.

Resolution: Works for Me

> make fails at jiffy
> ---
>
> Key: COUCHDB-3432
> URL: https://issues.apache.org/jira/browse/COUCHDB-3432
> Project: CouchDB
>  Issue Type: Bug
>  Components: Build System
>Reporter: Nicholas Chandoke
>
> On CentOS 7. {{[g]make release}} fails:
> {quote}==> couch_epi (compile)
> [. . .]
> ==> ioq (compile)
> ==> jiffy (compile)
> Compiling /home/n/apache-couchdb-2.0.0/src/jiffy/c_src/doubles.cc
> sh: line 0: exec: c++: not found
> ERROR: compile failed while processing 
> /home/n/apache-couchdb-2.0.0/src/jiffy: rebar_abort
> gmake: *** [couch] Error 1{quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Commented] (COUCHDB-3432) make fails at jiffy

2017-07-07 Thread Joan Touzet (JIRA)

[ 
https://issues.apache.org/jira/browse/COUCHDB-3432?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16078341#comment-16078341
 ] 

Joan Touzet commented on COUCHDB-3432:
--

You are missing a build dependency.

Here is what we install to build on CentOS 7:

https://github.com/apache/couchdb-ci/blob/master/ansible/roles/dependencies-centos/tasks/main.yml

> make fails at jiffy
> ---
>
> Key: COUCHDB-3432
> URL: https://issues.apache.org/jira/browse/COUCHDB-3432
> Project: CouchDB
>  Issue Type: Bug
>  Components: Build System
>Reporter: Nicholas Chandoke
>
> On CentOS 7. {{[g]make release}} fails:
> {quote}==> couch_epi (compile)
> [. . .]
> ==> ioq (compile)
> ==> jiffy (compile)
> Compiling /home/n/apache-couchdb-2.0.0/src/jiffy/c_src/doubles.cc
> sh: line 0: exec: c++: not found
> ERROR: compile failed while processing 
> /home/n/apache-couchdb-2.0.0/src/jiffy: rebar_abort
> gmake: *** [couch] Error 1{quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[GitHub] wohali commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali commented on issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313727920
 
 
   `src/couch/test/couchdb_compaction_daemon_tests.erl`
   
   I guess we could have this do the config dance to re-establish the correct 
value for `[daemons] compaction_daemon` and do the same thing I did in 
`dev/run` here: 
https://github.com/apache/couchdb/pull/651/files#diff-d86563699bde00df2cce2613ecbed531R193
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[jira] [Created] (COUCHDB-3432) make fails at jiffy

2017-07-07 Thread Nicholas Chandoke (JIRA)
Nicholas Chandoke created COUCHDB-3432:
--

 Summary: make fails at jiffy
 Key: COUCHDB-3432
 URL: https://issues.apache.org/jira/browse/COUCHDB-3432
 Project: CouchDB
  Issue Type: Bug
  Components: Build System
Reporter: Nicholas Chandoke


On CentOS 7. {{[g]make release}} fails:
{quote}==> couch_epi (compile)
[. . .]
==> ioq (compile)
==> jiffy (compile)
Compiling /home/n/apache-couchdb-2.0.0/src/jiffy/c_src/doubles.cc
sh: line 0: exec: c++: not found
ERROR: compile failed while processing /home/n/apache-couchdb-2.0.0/src/jiffy: 
rebar_abort
gmake: *** [couch] Error 1{quote}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[jira] [Created] (COUCHDB-3431) 400 error when posting valid JSON

2017-07-07 Thread glen (JIRA)
glen created COUCHDB-3431:
-

 Summary: 400 error when posting valid JSON
 Key: COUCHDB-3431
 URL: https://issues.apache.org/jira/browse/COUCHDB-3431
 Project: CouchDB
  Issue Type: Bug
Reporter: glen


Is anything invalid about the first member of an array being an empty object?

Please post [{}] to your Couch. Does your Couch return invalid json 400 error, 
or is it just me?




--
This message was sent by Atlassian JIRA
(v6.4.14#64029)


[GitHub] janl commented on issue #51: feat: switch from follow to cloudant-follow, which is maintaned

2017-07-07 Thread git
janl commented on issue #51: feat: switch from follow to cloudant-follow, which 
is maintaned
URL: https://github.com/apache/couchdb-nano/pull/51#issuecomment-313674201
 
 
   ping @garrensmith @glynnbird @jo 
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali opened a new issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652
 
 
   The recent enabling of the compaction daemon also has it now running during 
the eunit tests, which is why we're suddenly getting these `all_dbs_active` 
errors (for which I haven't filed a ticket yet - this is it.)
   
   If you had a `tmp/` directory inside of your couchdb checkout, you wouldn't 
notice this until you cleaned it out. Try removing your `tmp/` directory and 
you'll see the problems.
   
   Grep your `*/.eunit/couch.log` files for "Compaction" and you'll see it 
compacting `_nodes`, `_dbs`, etc..and with each eunit test not deleting its 
dbs (in many cases) we end up with it cycling over a lot of dbs over time, 
which ends up filling the LRU and sometimes exhausting fds. Tests closer to the 
end of the eunit test suite fail more often with `all_dbs_active`, especially 
`couchdb_views_tests:couchdb_1283`.
   
   I tried to set `[compactions] _default = []` (or the null string) in 
`rel/files/eunit.ini` but it doesn't work. And [the trick I 
used](https://github.com/apache/couchdb/pull/651/files#diff-d86563699bde00df2cce2613ecbed531R193)
 to stabilise the JS tests in my PR doesn't work, because some eunit tests 
actually need the compaction daemon, and that change completely removes its 
definition. Grumble.
   
   ## Expected Behavior
   Compaction daemon should be disabled completely during eunit test runs.
   
   ## Current Behavior
   Compaction daemon is running during eunit test runs, holding databases open, 
filling the LRU and wreaking havoc in general.
   
   ## Possible Solution
   We could set the `[compactions] _default` to something like:
   
   ```
   [compactions]
   _default = [{db_fragmentation, "99%"}, {view_fragmentation, "99%"}, {from, 
"00:00"}, {to, "00:01"}, {strict_window, true}]
   ```
   
   but there's still a window during which it could be interfering with 
databases, and during which it's going to scan through every single database it 
can get its hands on, which messes up the LRU. I don't know if specifying the 
same `from` and `to` time would work.
   
   ## Steps to Reproduce (for bugs)
   `make eunit` and grep your ~/.eunit/couch.log files for the string 
`"Compaction"`.
   
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali commented on issue #652: Compaction daemon is interfering with eunit tests (all_dbs_active errors)

2017-07-07 Thread git
wohali commented on issue #652: Compaction daemon is interfering with eunit 
tests (all_dbs_active errors)
URL: https://github.com/apache/couchdb/issues/652#issuecomment-313614461
 
 
   @janl mind taking a look at this one?
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] couchdb-jiffy issue #3: Fix enc_long for 64-bit Windows compilation.

2017-07-07 Thread NorthNick
Github user NorthNick commented on the issue:

https://github.com/apache/couchdb-jiffy/pull/3
  
Thanks Joan


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] wohali closed issue #630: EUnit couch_changes_tests failing should_select_with_continuous test due to heartbeats

2017-07-07 Thread git
wohali closed issue #630: EUnit couch_changes_tests failing 
should_select_with_continuous test due to heartbeats
URL: https://github.com/apache/couchdb/issues/630
 
 
   
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] wohali opened a new pull request #651: JS test cleanup x3

2017-07-07 Thread git
wohali opened a new pull request #651: JS test cleanup x3
URL: https://github.com/apache/couchdb/pull/651
 
 
   ## Overview
   
   This PR includes 3 small commits to fix up the JS test suite:
   
   1. Really, truly disable the compaction daemon. (The previous approach 
wasn't working.)
   2. Ensure that every JS test cleans up after itself, removing all old 
databases before exiting.
   3. Disable the flaky `stats.js` test, which needs a complete rewrite to make 
sense. It was my fault re-enabling it even partially for 2.0. Sorry.
   
   ## Testing recommendations
   
   `make javascript` and look for all passes.
 

This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services