This is an automated email from the ASF dual-hosted git repository. rnewson pushed a commit to branch enhance_reduce_limit-2 in repository https://gitbox.apache.org/repos/asf/couchdb.git
commit 2d243f03c57b7b98c9226c8c721efb3c046963db Author: Robert Newson <[email protected]> AuthorDate: Thu Oct 30 10:53:18 2025 +0000 Log db+view for reduce overflow errors when enabled --- share/server/views.js | 11 +++-------- src/couch/src/couch_proc_manager.erl | 3 ++- src/couch/src/couch_query_servers.erl | 11 +++-------- src/fabric/include/fabric.hrl | 1 + src/fabric/src/fabric_view_reduce.erl | 13 +++++++++++-- 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/share/server/views.js b/share/server/views.js index d4e0e6ed6..c7870ea9e 100644 --- a/share/server/views.js +++ b/share/server/views.js @@ -44,12 +44,7 @@ var Views = (function() { "input size:", input_length, "output size:", reduce_length ].join(" "); - if (State.query_config.reduce_limit === "log") { - log("reduce_overflow_error: " + log_message); - print("[true," + reduce_line + "]"); - } else { - throw(["error", "reduce_overflow_error", log_message]); - }; + throw(["error", "reduce_overflow_error", log_message]); } else { print("[true," + reduce_line + "]"); } @@ -65,8 +60,8 @@ var Views = (function() { // fatal_error. But by default if they don't do error handling we // just eat the exception and carry on. // - // In this case we abort map processing but don't destroy the - // JavaScript process. If you need to destroy the JavaScript + // In this case we abort map processing but don't destroy the + // JavaScript process. If you need to destroy the JavaScript // process, throw the error form matched by the block below. throw(["error", "map_runtime_error", "function raised 'fatal_error'"]); } else if (err[0] == "fatal") { diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl index aa538e23e..106aa2b04 100644 --- a/src/couch/src/couch_proc_manager.erl +++ b/src/couch/src/couch_proc_manager.erl @@ -26,7 +26,8 @@ reload/0, terminate_stale_procs/0, get_servers_from_env/1, - native_query_server_enabled/0 + native_query_server_enabled/0, + get_reduce_limit/0 ]). -export([ diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl index ef9e28f9b..dae7ac60b 100644 --- a/src/couch/src/couch_query_servers.erl +++ b/src/couch/src/couch_query_servers.erl @@ -282,26 +282,21 @@ check_sum_overflow(InSize, OutSize, Sum) -> Overflowed = OutSize > reduce_limit_threshold() andalso OutSize * reduce_limit_ratio() > InSize, case config:get("query_server_config", "reduce_limit", "true") of "true" when Overflowed -> - Msg = log_sum_overflow(InSize, OutSize), + Msg = sum_overflow_msg(InSize, OutSize), {[ {<<"error">>, <<"builtin_reduce_error">>}, {<<"reason">>, Msg} ]}; - "log" when Overflowed -> - log_sum_overflow(InSize, OutSize), - Sum; _ -> Sum end. -log_sum_overflow(InSize, OutSize) -> +sum_overflow_msg(InSize, OutSize) -> Fmt = "Reduce output must shrink more rapidly: " "input size: ~b " "output size: ~b", - Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])), - couch_log:error(Msg, []), - Msg. + iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])). reduce_limit_threshold() -> config:get_integer("query_server_config", "reduce_limit_threshold", 4906). diff --git a/src/fabric/include/fabric.hrl b/src/fabric/include/fabric.hrl index 6312741c2..ecb8e9fd0 100644 --- a/src/fabric/include/fabric.hrl +++ b/src/fabric/include/fabric.hrl @@ -12,6 +12,7 @@ -record(collector, { db_name=nil, + view_name=nil, query_args, callback, counters, diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl index 3206d01a4..adc4f09b7 100644 --- a/src/fabric/src/fabric_view_reduce.erl +++ b/src/fabric/src/fabric_view_reduce.erl @@ -50,7 +50,7 @@ go(Db, DDoc, VName, Args, Callback, Acc, VInfo) -> Callback({error, insufficient_storage}, Acc); {ok, Workers} -> try - go2(DbName, Workers, VInfo, CoordArgs, Callback, Acc) + go2(DbName, VName, Workers, VInfo, CoordArgs, Callback, Acc) after fabric_streams:cleanup(Workers) end; @@ -64,7 +64,7 @@ go(Db, DDoc, VName, Args, Callback, Acc, VInfo) -> rexi_monitor:stop(RexiMon) end. -go2(DbName, Workers, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) -> +go2(DbName, Workers, VName, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) -> #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args, RedSrc = couch_mrview_util:extract_view_reduce(VInfo), OsProc = @@ -74,6 +74,7 @@ go2(DbName, Workers, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) -> end, State = #collector{ db_name = DbName, + view_name = VName, query_args = Args, callback = Callback, counters = fabric_dict:init(Workers, 0), @@ -169,6 +170,14 @@ handle_message({meta, Meta0}, {Worker, From}, State) -> end; handle_message(#view_row{} = Row, {_, _} = Source, State) -> handle_row(Row, Source, State); +handle_message({view_row, #{value := {[{reduce_overflow_error, Msg}]}}} = Row, {_, _} = Source, State) -> + case couch_proc_manager:get_reduce_limit() of + "log" -> + couch_log:warning("reduce_overflow from ~s:~s: ~s", [State#collector.db_name, State#collector.view_name, Msg]); + _Else -> + ok + end, + handle_row(Row, Source, State); handle_message({view_row, #{}} = Row, {_, _} = Source, State) -> handle_row(Row, Source, State); handle_message(complete, Worker, #collector{counters = Counters0} = State) ->
