Use MemTracker::MemLimitExceeded() where appropriate This is an incremental improvement towards IMPALA-3090. Where possible we use MemTracker::MemLimitExceeded() instead of directly constructing the Status object.
The remaining cases where we directly construct the state are related the the BufferedBlockMgr, which will be deprecated: either they are produced by the BufferedBlockMgr, or produced when a Pin() unexpectedly fails. Both of these will go away anyway. Change-Id: I77c37f86dd15ace39e28b5cc72d37bc8d4109041 Reviewed-on: http://gerrit.cloudera.org:8080/3148 Reviewed-by: Tim Armstrong <[email protected]> Tested-by: Internal Jenkins Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/7d5d36a6 Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/7d5d36a6 Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/7d5d36a6 Branch: refs/heads/master Commit: 7d5d36a6e4a866c01193516a65c0fee00d81c20f Parents: 9d23f4a Author: Tim Armstrong <[email protected]> Authored: Thu May 19 16:03:14 2016 -0700 Committer: Tim Armstrong <[email protected]> Committed: Mon May 23 08:40:19 2016 -0700 ---------------------------------------------------------------------- be/src/exec/hdfs-parquet-scanner.cc | 2 +- be/src/exec/kudu-scanner.cc | 5 ++--- be/src/exec/nested-loop-join-node.cc | 14 +++++++++----- be/src/runtime/buffered-tuple-stream-test.cc | 6 ++++-- be/src/runtime/collection-value-builder-test.cc | 2 +- be/src/runtime/collection-value-builder.h | 17 +++++++++++------ be/src/runtime/row-batch-serialize-test.cc | 2 +- be/src/service/fragment-mgr.cc | 16 ++++++++-------- 8 files changed, 37 insertions(+), 27 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/exec/hdfs-parquet-scanner.cc ---------------------------------------------------------------------- diff --git a/be/src/exec/hdfs-parquet-scanner.cc b/be/src/exec/hdfs-parquet-scanner.cc index a70fc8a..d9ce76f 100644 --- a/be/src/exec/hdfs-parquet-scanner.cc +++ b/be/src/exec/hdfs-parquet-scanner.cc @@ -1715,7 +1715,7 @@ bool HdfsParquetScanner::CollectionColumnReader::ReadSlot(void* slot, MemPool* p CollectionValue* coll_slot = reinterpret_cast<CollectionValue*>(slot); *coll_slot = CollectionValue(); CollectionValueBuilder builder( - coll_slot, *slot_desc_->collection_item_descriptor(), pool); + coll_slot, *slot_desc_->collection_item_descriptor(), pool, parent_->state_); bool continue_execution = parent_->AssembleCollection( children_, new_collection_rep_level(), &builder); if (!continue_execution) return false; http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/exec/kudu-scanner.cc ---------------------------------------------------------------------- diff --git a/be/src/exec/kudu-scanner.cc b/be/src/exec/kudu-scanner.cc index 18098aa..db98f8d 100644 --- a/be/src/exec/kudu-scanner.cc +++ b/be/src/exec/kudu-scanner.cc @@ -290,9 +290,8 @@ Status KuduScanner::RelocateValuesFromKudu(Tuple* tuple, MemPool* mem_pool) { if (LIKELY(val->len > 0)) { // The allocator returns a NULL ptr when out of memory. if (UNLIKELY(val->ptr == NULL)) { - Status s = Status::MemLimitExceeded(); - s.AddDetail("Could not allocate memory for string."); - return s; + return mem_pool->mem_tracker()->MemLimitExceeded(state_, + "Kudu scanner could not allocate memory for string", val->len); } memcpy(val->ptr, old_buf, val->len); } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/exec/nested-loop-join-node.cc ---------------------------------------------------------------------- diff --git a/be/src/exec/nested-loop-join-node.cc b/be/src/exec/nested-loop-join-node.cc index 646c089..a12957e 100644 --- a/be/src/exec/nested-loop-join-node.cc +++ b/be/src/exec/nested-loop-join-node.cc @@ -80,8 +80,10 @@ Status NestedLoopJoinNode::Prepare(RuntimeState* state) { if (child(1)->type() == TPlanNodeType::type::SINGULAR_ROW_SRC_NODE) { // Allocate a fixed-size bitmap with a single element if we have a singular // row source node as our build child. - if (!mem_tracker()->TryConsume(Bitmap::MemUsage(1))) { - return Status::MemLimitExceeded(); + int64_t bitmap_mem_usage = Bitmap::MemUsage(1); + if (!mem_tracker()->TryConsume(bitmap_mem_usage)) { + return mem_tracker()->MemLimitExceeded(state, + "Could not allocate bitmap in nested loop join", bitmap_mem_usage); } matching_build_rows_.reset(new Bitmap(1)); } else { @@ -185,9 +187,11 @@ Status NestedLoopJoinNode::ConstructBuildSide(RuntimeState* state) { matching_build_rows_->SetAllBits(false); } else { // Account for the additional memory used by the bitmap. - if (!mem_tracker()->TryConsume(Bitmap::MemUsage(num_bits) - - matching_build_rows_->MemUsage())) { - return Status::MemLimitExceeded(); + int64_t bitmap_size_increase = + Bitmap::MemUsage(num_bits) - matching_build_rows_->MemUsage(); + if (!mem_tracker()->TryConsume(bitmap_size_increase)) { + return mem_tracker()->MemLimitExceeded(state, + "Could not expand bitmap in nested loop join", bitmap_size_increase); } matching_build_rows_->Reset(num_bits); } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/runtime/buffered-tuple-stream-test.cc ---------------------------------------------------------------------- diff --git a/be/src/runtime/buffered-tuple-stream-test.cc b/be/src/runtime/buffered-tuple-stream-test.cc index c1633a9..a72be3c 100644 --- a/be/src/runtime/buffered-tuple-stream-test.cc +++ b/be/src/runtime/buffered-tuple-stream-test.cc @@ -1006,7 +1006,8 @@ TEST_F(ArrayTupleStreamTest, TestArrayDeepCopy) { CollectionValue* cv = tuple0->GetCollectionSlot(array_slot_desc->tuple_offset()); cv->ptr = NULL; cv->num_tuples = 0; - CollectionValueBuilder builder(cv, *item_desc, mem_pool_.get(), array_len); + CollectionValueBuilder builder(cv, *item_desc, mem_pool_.get(), runtime_state_, + array_len); Tuple* array_data; int num_rows; builder.GetFreeMemory(&array_data, &num_rows); @@ -1108,7 +1109,8 @@ TEST_F(ArrayTupleStreamTest, TestComputeRowSize) { const TupleDescriptor* item_desc = array_slot->collection_item_descriptor(); int array_len = 128; CollectionValue* cv = tuple0->GetCollectionSlot(array_slot->tuple_offset()); - CollectionValueBuilder builder(cv, *item_desc, mem_pool_.get(), array_len); + CollectionValueBuilder builder(cv, *item_desc, mem_pool_.get(), runtime_state_, + array_len); Tuple* array_data; int num_rows; builder.GetFreeMemory(&array_data, &num_rows); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/runtime/collection-value-builder-test.cc ---------------------------------------------------------------------- diff --git a/be/src/runtime/collection-value-builder-test.cc b/be/src/runtime/collection-value-builder-test.cc index d0bc2fa..e86737b 100644 --- a/be/src/runtime/collection-value-builder-test.cc +++ b/be/src/runtime/collection-value-builder-test.cc @@ -41,7 +41,7 @@ TEST(CollectionValueBuilderTest, MaxBufferSize) { MemTracker tracker(mem_limit, mem_limit); MemPool pool(&tracker); CollectionValueBuilder coll_value_builder( - &coll_value, tuple_desc, &pool, initial_capacity); + &coll_value, tuple_desc, &pool, NULL, initial_capacity); EXPECT_EQ(tracker.consumption(), initial_capacity * 4); // Attempt to double the buffer so it goes over 32-bit INT_MAX. http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/runtime/collection-value-builder.h ---------------------------------------------------------------------- diff --git a/be/src/runtime/collection-value-builder.h b/be/src/runtime/collection-value-builder.h index f7967e9..c57b546 100644 --- a/be/src/runtime/collection-value-builder.h +++ b/be/src/runtime/collection-value-builder.h @@ -16,6 +16,7 @@ #define IMPALA_RUNTIME_COLLECTION_VALUE_BUILDER_H #include "runtime/collection-value.h" +#include "runtime/mem-tracker.h" #include "runtime/tuple.h" #include "util/debug-util.h" @@ -30,10 +31,12 @@ class CollectionValueBuilder { static const int DEFAULT_INITIAL_TUPLE_CAPACITY = 4; CollectionValueBuilder(CollectionValue* coll_value, const TupleDescriptor& tuple_desc, - MemPool* pool, int initial_tuple_capacity = DEFAULT_INITIAL_TUPLE_CAPACITY) + MemPool* pool, RuntimeState* state, + int initial_tuple_capacity = DEFAULT_INITIAL_TUPLE_CAPACITY) : coll_value_(coll_value), tuple_desc_(tuple_desc), - pool_(pool) { + pool_(pool), + state_(state) { buffer_size_ = initial_tuple_capacity * tuple_desc_.byte_size(); coll_value_->ptr = pool_->TryAllocate(buffer_size_); if (coll_value_->ptr == NULL) buffer_size_ = 0; @@ -61,10 +64,9 @@ class CollectionValueBuilder { *num_tuples = 0; string path = tuple_desc_.table_desc() == NULL ? "" : PrintPath(*tuple_desc_.table_desc(), tuple_desc_.tuple_path()); - Status status = Status::MemLimitExceeded(); - status.AddDetail(ErrorMsg(TErrorCode::COLLECTION_ALLOC_FAILED, new_buffer_size, - path, buffer_size_, coll_value_->num_tuples).msg()); - return status; + return pool_->mem_tracker()->MemLimitExceeded(state_, + ErrorMsg(TErrorCode::COLLECTION_ALLOC_FAILED, new_buffer_size, + path, buffer_size_, coll_value_->num_tuples).msg(), new_buffer_size); } memcpy(new_buf, coll_value_->ptr, bytes_written); coll_value_->ptr = new_buf; @@ -97,6 +99,9 @@ class CollectionValueBuilder { /// The pool backing coll_value_'s buffer MemPool* pool_; + /// May be NULL. If non-NULL, used to log memory limit errors. + RuntimeState* state_; + /// The current size of coll_value_'s buffer in bytes, including any unused space /// (i.e. buffer_size_ is equal to or larger than coll_value_->ByteSize()). int64_t buffer_size_; http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/runtime/row-batch-serialize-test.cc ---------------------------------------------------------------------- diff --git a/be/src/runtime/row-batch-serialize-test.cc b/be/src/runtime/row-batch-serialize-test.cc index cfe539f..cb49bda 100644 --- a/be/src/runtime/row-batch-serialize-test.cc +++ b/be/src/runtime/row-batch-serialize-test.cc @@ -158,7 +158,7 @@ class RowBatchSerializeTest : public testing::Test { const TupleDescriptor* item_desc = slot_desc.collection_item_descriptor(); int array_len = rand() % (MAX_ARRAY_LEN + 1); CollectionValue cv; - CollectionValueBuilder builder(&cv, *item_desc, pool, array_len); + CollectionValueBuilder builder(&cv, *item_desc, pool, NULL, array_len); Tuple* tuple_mem; int n; EXPECT_OK(builder.GetFreeMemory(&tuple_mem, &n)); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/7d5d36a6/be/src/service/fragment-mgr.cc ---------------------------------------------------------------------- diff --git a/be/src/service/fragment-mgr.cc b/be/src/service/fragment-mgr.cc index c3e44e4..5210f8c 100644 --- a/be/src/service/fragment-mgr.cc +++ b/be/src/service/fragment-mgr.cc @@ -42,14 +42,14 @@ Status FragmentMgr::ExecPlanFragment(const TExecPlanFragmentParams& exec_params) // Preparing and opening the fragment creates a thread and consumes a non-trivial // amount of memory. If we are already starved for memory, cancel the fragment as // early as possible to avoid digging the hole deeper. - if (ExecEnv::GetInstance()->process_mem_tracker()->LimitExceeded()) { - Status status = Status::MemLimitExceeded(); - status.AddDetail(Substitute("Instance $0 of plan fragment $1 of query $2 could not " - "start because the backend Impala daemon is over its memory limit", - PrintId(exec_params.fragment_instance_ctx.fragment_instance_id), - exec_params.fragment.display_name, - PrintId(exec_params.fragment_instance_ctx.query_ctx.query_id))); - return status; + MemTracker* process_mem_tracker = ExecEnv::GetInstance()->process_mem_tracker(); + if (process_mem_tracker->LimitExceeded()) { + string msg = Substitute("Instance $0 of plan fragment $1 of query $2 could not " + "start because the backend Impala daemon is over its memory limit", + PrintId(exec_params.fragment_instance_ctx.fragment_instance_id), + exec_params.fragment.display_name, + PrintId(exec_params.fragment_instance_ctx.query_ctx.query_id)); + return process_mem_tracker->MemLimitExceeded(NULL, msg, 0); } // Remote fragments must always have a sink. Remove when IMPALA-2905 is resolved.
