This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 98739a84557a209e05694abd79f62f7f7daf8777
Author: Riza Suminto <[email protected]>
AuthorDate: Wed May 15 21:13:54 2024 -0700

    IMPALA-13083: Clarify REASON_MEM_LIMIT_TOO_LOW_FOR_RESERVATION
    
    This patch improves REASON_MEM_LIMIT_TOO_LOW_FOR_RESERVATION error
    message by saying the specific configuration that must be adjusted such
    that the query can pass the Admission Control. New fields
    'per_backend_mem_to_admit_source' and
    'coord_backend_mem_to_admit_source' of type MemLimitSourcePB are added
    into QuerySchedulePB. These fields explain what limiting factor drives
    final numbers at 'per_backend_mem_to_admit' and
    'coord_backend_mem_to_admit' respectively. In turn, Admission Control
    will use this information to compose a more informative error message
    that the user can act upon. The new error message pattern also
    explicitly mentions "Per Host Min Memory Reservation" as a place to look
    at to investigate memory reservations scheduled for each backend node.
    
    Updated documentation with examples of query rejection by Admission
    Control and how to read the error message.
    
    Testing:
    - Add BE tests at admission-controller-test.cc
    - Adjust and pass affected EE tests
    
    Change-Id: I1ef7fb7e7a194b2036c2948639a06c392590bf66
    Reviewed-on: http://gerrit.cloudera.org:8080/21436
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 be/src/scheduling/admission-controller-test.cc     | 407 +++++++++++++++++++--
 be/src/scheduling/admission-controller.cc          |  87 ++++-
 be/src/scheduling/schedule-state.cc                | 136 +++++--
 be/src/scheduling/schedule-state.h                 |  25 ++
 be/src/scheduling/scheduler.cc                     |   5 +-
 be/src/scheduling/scheduler.h                      |   2 +
 common/protobuf/admission_control_service.proto    |  23 ++
 docs/topics/impala_admission.xml                   |  71 +++-
 docs/topics/impala_mem_limit.xml                   |   8 +-
 .../QueryTest/admission-max-min-mem-limits.test    |  19 +-
 .../admission-reject-min-reservation.test          |  25 +-
 .../QueryTest/runtime_row_filter_reservations.test |   6 +-
 12 files changed, 688 insertions(+), 126 deletions(-)

diff --git a/be/src/scheduling/admission-controller-test.cc 
b/be/src/scheduling/admission-controller-test.cc
index d096bb90f..545002f1b 100644
--- a/be/src/scheduling/admission-controller-test.cc
+++ b/be/src/scheduling/admission-controller-test.cc
@@ -56,6 +56,12 @@ static const string HOST_2 = "host2:25000";
 // The default version of the heavy memory query list.
 static std::vector<THeavyMemoryQuery> empty_heavy_memory_query_list;
 
+// Default numbers used in few tests below.
+static const int64_t DEFAULT_PER_EXEC_MEM_ESTIMATE = GIGABYTE;
+static const int64_t DEFAULT_COORD_MEM_ESTIMATE = 150 * MEGABYTE;
+static const int64_t ADMIT_MEM_LIMIT_BACKEND = GIGABYTE;
+static const int64_t ADMIT_MEM_LIMIT_COORD = 512 * MEGABYTE;
+
 /// Parent class for Admission Controller tests.
 /// Common code and constants should go here.
 /// These are single threaded tests so we access the internal data structures 
of
@@ -88,9 +94,10 @@ class AdmissionControllerTest : public testing::Test {
   /// Make a ScheduleState with dummy parameters that can be used to test 
admission and
   /// rejection in AdmissionController.
   ScheduleState* MakeScheduleState(string request_pool_name, int64_t mem_limit,
-      TPoolConfig& config, const int num_hosts, const int 
per_host_mem_estimate,
-      const int coord_mem_estimate, bool is_dedicated_coord,
-      const string& executor_group = 
ImpalaServer::DEFAULT_EXECUTOR_GROUP_NAME) {
+      TPoolConfig& config, const int num_hosts, const int64_t 
per_host_mem_estimate,
+      const int64_t coord_mem_estimate, bool is_dedicated_coord,
+      const string& executor_group = ImpalaServer::DEFAULT_EXECUTOR_GROUP_NAME,
+      int64_t mem_limit_executors = -1, int64_t mem_limit_coordinators = -1) {
     DCHECK_GT(num_hosts, 0);
     TQueryExecRequest* request = pool_.Add(new TQueryExecRequest());
     request->query_ctx.request_pool = request_pool_name;
@@ -101,7 +108,13 @@ class AdmissionControllerTest : public testing::Test {
     RuntimeProfile* profile = RuntimeProfile::Create(&pool_, "pool1");
     UniqueIdPB* query_id = pool_.Add(new UniqueIdPB()); // always 0,0
     TQueryOptions* query_options = pool_.Add(new TQueryOptions());
-    query_options->__set_mem_limit(mem_limit);
+    if (mem_limit > -1) query_options->__set_mem_limit(mem_limit);
+    if (mem_limit_executors > -1) {
+      query_options->__set_mem_limit_executors(mem_limit_executors);
+    }
+    if (mem_limit_coordinators > -1) {
+      query_options->__set_mem_limit_coordinators(mem_limit_coordinators);
+    }
     ScheduleState* schedule_state =
         pool_.Add(new ScheduleState(*query_id, *request, *query_options, 
profile, true));
     schedule_state->set_executor_group(executor_group);
@@ -332,6 +345,130 @@ class AdmissionControllerTest : public testing::Test {
       ResetMemConsumed(child);
     }
   }
+
+  ScheduleState* DedicatedCoordAdmissionSetup(TPoolConfig& test_pool_config,
+      int64_t mem_limit, int64_t mem_limit_executors, int64_t 
mem_limit_coordinators) {
+    AdmissionController* admission_controller = MakeAdmissionController();
+    RequestPoolService* request_pool_service =
+        admission_controller->request_pool_service_;
+
+    Status status = request_pool_service->GetPoolConfig("default", 
&test_pool_config);
+    if (!status.ok()) return nullptr;
+    test_pool_config.__set_max_mem_resources(
+        2 * GIGABYTE); // to enable memory based admission.
+
+    // Set up a query schedule to test.
+    ScheduleState* test_state = MakeScheduleState("default", mem_limit, 
test_pool_config,
+        2, DEFAULT_PER_EXEC_MEM_ESTIMATE, DEFAULT_COORD_MEM_ESTIMATE, true,
+        ImpalaServer::DEFAULT_EXECUTOR_GROUP_NAME, mem_limit_executors,
+        mem_limit_coordinators);
+    test_state->ClearBackendScheduleStates();
+    // Add coordinator backend.
+    const string coord_host_name = Substitute("host$0", 1);
+    NetworkAddressPB coord_addr = MakeNetworkAddressPB(coord_host_name, 25000);
+    const string coord_host = NetworkAddressPBToString(coord_addr);
+    BackendScheduleState& coord_exec_params =
+        test_state->GetOrCreateBackendScheduleState(coord_addr);
+    coord_exec_params.exec_params->set_is_coord_backend(true);
+    coord_exec_params.exec_params->set_thread_reservation(1);
+    coord_exec_params.exec_params->set_slots_to_use(2);
+    coord_exec_params.be_desc.set_admit_mem_limit(ADMIT_MEM_LIMIT_COORD);
+    coord_exec_params.be_desc.set_admission_slots(8);
+    coord_exec_params.be_desc.set_is_executor(false);
+    coord_exec_params.be_desc.set_is_coordinator(true);
+    coord_exec_params.be_desc.set_ip_address(test::HostIdxToIpAddr(1));
+    // Add executor backend.
+    const string exec_host_name = Substitute("host$0", 2);
+    NetworkAddressPB exec_addr = MakeNetworkAddressPB(exec_host_name, 25000);
+    const string exec_host = NetworkAddressPBToString(exec_addr);
+    BackendScheduleState& backend_schedule_state =
+        test_state->GetOrCreateBackendScheduleState(exec_addr);
+    backend_schedule_state.exec_params->set_thread_reservation(1);
+    backend_schedule_state.exec_params->set_slots_to_use(2);
+    
backend_schedule_state.be_desc.set_admit_mem_limit(ADMIT_MEM_LIMIT_BACKEND);
+    backend_schedule_state.be_desc.set_admission_slots(8);
+    backend_schedule_state.be_desc.set_is_executor(true);
+    backend_schedule_state.be_desc.set_ip_address(test::HostIdxToIpAddr(2));
+
+    ExecutorGroupCoordinatorPair group1 = MakeExecutorConfig(*test_state);
+    test_state->UpdateMemoryRequirements(test_pool_config,
+        group1.second.admit_mem_limit(),
+        group1.first.GetPerExecutorMemLimitForAdmission());
+    return test_state;
+  }
+
+  bool CanAccommodateMaxInitialReservation(const ScheduleState& state,
+      const TPoolConfig& pool_cfg, string* mem_unavailable_reason) {
+    return AdmissionController::CanAccommodateMaxInitialReservation(
+        state, pool_cfg, mem_unavailable_reason);
+  }
+
+  void TestDedicatedCoordAdmissionRejection(TPoolConfig& test_pool_config,
+      int64_t mem_limit, int64_t mem_limit_executors, int64_t 
mem_limit_coordinators) {
+    ScheduleState* test_state = DedicatedCoordAdmissionSetup(
+        test_pool_config, mem_limit, mem_limit_executors, 
mem_limit_coordinators);
+    ASSERT_NE(nullptr, test_state);
+
+    string not_admitted_reason = "--not set--";
+    const bool mimic_old_behaviour = test_pool_config.min_query_mem_limit == 0
+        && test_pool_config.max_query_mem_limit == 0;
+    const bool backend_mem_unlimited = mimic_old_behaviour && mem_limit < 0
+        && mem_limit_executors < 0 && mem_limit_coordinators < 0;
+
+    if (backend_mem_unlimited) {
+      ASSERT_EQ(-1, test_state->per_backend_mem_limit());
+      ASSERT_EQ(-1, test_state->coord_backend_mem_limit());
+    }
+    // Both coordinator and executor reservation fits.
+    test_state->set_largest_min_reservation(400 * MEGABYTE);
+    test_state->set_coord_min_reservation(50 * MEGABYTE);
+    bool can_accomodate = CanAccommodateMaxInitialReservation(
+        *test_state, test_pool_config, &not_admitted_reason);
+    EXPECT_STR_CONTAINS(not_admitted_reason, "--not set--");
+    ASSERT_TRUE(can_accomodate);
+    // Coordinator reservation doesn't fit.
+    test_state->set_largest_min_reservation(400 * MEGABYTE);
+    test_state->set_coord_min_reservation(700 * MEGABYTE);
+    can_accomodate = CanAccommodateMaxInitialReservation(
+        *test_state, test_pool_config, &not_admitted_reason);
+    if (!backend_mem_unlimited) {
+      EXPECT_STR_CONTAINS(not_admitted_reason,
+          "minimum memory reservation is greater than memory available to the 
query for "
+          "buffer reservations. Memory reservation needed given the current 
plan: 700.00 "
+          "MB. Adjust the MEM_LIMIT option ");
+      ASSERT_FALSE(can_accomodate);
+    } else {
+      ASSERT_TRUE(can_accomodate);
+    }
+    // Neither coordinator nor executor reservation fits.
+    test_state->set_largest_min_reservation(GIGABYTE);
+    test_state->set_coord_min_reservation(GIGABYTE);
+    can_accomodate = CanAccommodateMaxInitialReservation(
+        *test_state, test_pool_config, &not_admitted_reason);
+    if (!backend_mem_unlimited) {
+      EXPECT_STR_CONTAINS(not_admitted_reason,
+          "minimum memory reservation is greater than memory available to the 
query for "
+          "buffer reservations. Memory reservation needed given the current 
plan: 1.00 "
+          "GB. Adjust the MEM_LIMIT option ");
+      ASSERT_FALSE(can_accomodate);
+    } else {
+      ASSERT_TRUE(can_accomodate);
+    }
+    // Executor reservation doesn't fit.
+    test_state->set_largest_min_reservation(900 * MEGABYTE);
+    test_state->set_coord_min_reservation(50 * MEGABYTE);
+    can_accomodate = CanAccommodateMaxInitialReservation(
+        *test_state, test_pool_config, &not_admitted_reason);
+    if (!backend_mem_unlimited) {
+      EXPECT_STR_CONTAINS(not_admitted_reason,
+          "minimum memory reservation is greater than memory available to the 
query for "
+          "buffer reservations. Memory reservation needed given the current 
plan: 900.00 "
+          "MB. Adjust the MEM_LIMIT option ");
+      ASSERT_FALSE(can_accomodate);
+    } else {
+      ASSERT_TRUE(can_accomodate);
+    }
+  }
 };
 
 /// Test that AdmissionController will admit a query into a pool, then 
simulate other
@@ -878,6 +1015,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group.second.admit_mem_limit(),
       group.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::COORDINATOR_ONLY_OPTIMIZATION,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(0, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(COORD_MEM_ESTIMATE, schedule_state->coord_backend_mem_to_admit());
 
@@ -891,6 +1032,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group1.second.admit_mem_limit(),
       group1.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_PER_HOST_MEM_ESTIMATE,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(PER_EXEC_MEM_ESTIMATE, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(COORD_MEM_ESTIMATE, schedule_state->coord_backend_mem_to_admit());
   ASSERT_EQ(-1, schedule_state->per_backend_mem_limit());
@@ -908,6 +1053,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group2.second.admit_mem_limit(),
       group2.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(COORD_MEM_ESTIMATE, schedule_state->coord_backend_mem_to_admit());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_limit());
@@ -926,6 +1075,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group3.second.admit_mem_limit(),
       group3.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::ADJUSTED_DEDICATED_COORDINATOR_MEM_ESTIMATE,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(min_coord_mem_limit_required, 
schedule_state->coord_backend_mem_to_admit());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_limit());
@@ -939,6 +1092,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group4.second.admit_mem_limit(),
       group4.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(GIGABYTE, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(GIGABYTE, schedule_state->coord_backend_mem_to_admit());
   ASSERT_EQ(GIGABYTE, schedule_state->per_backend_mem_limit());
@@ -954,6 +1111,10 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordScheduleState) {
   schedule_state->UpdateMemoryRequirements(pool_config,
       group5.second.admit_mem_limit(),
       group5.first.GetPerExecutorMemLimitForAdmission());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT,
+      schedule_state->coord_backend_mem_to_admit_source());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_to_admit());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->coord_backend_mem_to_admit());
   ASSERT_EQ(700 * MEGABYTE, schedule_state->per_backend_mem_limit());
@@ -1091,45 +1252,219 @@ TEST_F(AdmissionControllerTest, 
DedicatedCoordAdmissionChecks) {
                       "Not enough admission control slots available "
                       "on host host2:25000. Needed 2 slots but 7/8 are already 
in use.");
   ASSERT_FALSE(coordinator_resource_limited);
+}
 
-  // Test 4: Make sure that coord and executors have separate checks on for 
whether their
-  // mem limits can accommodate their respective initial reservations.
-  schedule_state = MakeScheduleState(
-      "default", 0, pool_config, 2, PER_EXEC_MEM_ESTIMATE, COORD_MEM_ESTIMATE, 
true);
-  pool_config.__set_min_query_mem_limit(MEGABYTE); // to auto set mem_limit(s).
-  ExecutorGroupCoordinatorPair group1 = MakeExecutorConfig(*schedule_state);
-  schedule_state->UpdateMemoryRequirements(pool_config,
-      group1.second.admit_mem_limit(),
-      group1.first.GetPerExecutorMemLimitForAdmission());
+// Test rejection with pool's mem limit clamp set to 0 and no MEM_LIMIT set.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionZeroPoolMemLimit) {
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(0);
+  pool_config.__set_max_query_mem_limit(0);
+  TestDedicatedCoordAdmissionRejection(pool_config, -1, -1, -1);
+}
+
+// Test rejection with pool's mem limit clamp set to non default value.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmission1MBPoolMemLimit) {
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(MEGABYTE);
+  TestDedicatedCoordAdmissionRejection(pool_config, -1, -1, -1);
+}
+
+// Test rejection with pool's mem limit clamp disabled and no MEM_LIMIT set.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionDisabledPoolMemLimit) {
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(GIGABYTE);
+  pool_config.__set_clamp_mem_limit_query_option(false);
+  TestDedicatedCoordAdmissionRejection(pool_config, -1, -1, -1);
+}
+
+// Test rejection with MEM_LIMIT set to non default value.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionAtCoordAdmitMemLimit) {
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(0);
+  pool_config.__set_max_query_mem_limit(0);
+  TestDedicatedCoordAdmissionRejection(pool_config, ADMIT_MEM_LIMIT_COORD, -1, 
-1);
+}
+
+// Test rejection with pool's mem limit clamp and MEM_LIMIT set to non default 
value.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionWithPoolAndMemLimit) {
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(MEGABYTE);
+  TestDedicatedCoordAdmissionRejection(pool_config, ADMIT_MEM_LIMIT_COORD, -1, 
-1);
+}
+
+// Test that memory clamping is ignored if clamp_mem_limit_query_option is 
false.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionIgnoreMemClamp) {
+  TPoolConfig pool_config;
+  string not_admitted_reason = "--not set--";
+  pool_config.__set_min_query_mem_limit(2 * MEGABYTE);
+  pool_config.__set_max_query_mem_limit(2 * MEGABYTE);
+  pool_config.__set_clamp_mem_limit_query_option(false);
+  ScheduleState* schedule_state = MakeScheduleState("default", MEGABYTE, 
pool_config, 2,
+      DEFAULT_PER_EXEC_MEM_ESTIMATE, DEFAULT_COORD_MEM_ESTIMATE, true);
   schedule_state->set_largest_min_reservation(600 * MEGABYTE);
   schedule_state->set_coord_min_reservation(50 * MEGABYTE);
-  ASSERT_TRUE(AdmissionController::CanAccommodateMaxInitialReservation(
-      *schedule_state, pool_config, &not_admitted_reason));
-  // Coordinator reservation doesn't fit.
-  schedule_state->set_coord_min_reservation(200 * MEGABYTE);
-  ASSERT_FALSE(AdmissionController::CanAccommodateMaxInitialReservation(
-      *schedule_state, pool_config, &not_admitted_reason));
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MEGABYTE, schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(MEGABYTE, schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(MEGABYTE, schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(MEGABYTE, schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
   EXPECT_STR_CONTAINS(not_admitted_reason,
-      "minimum memory reservation is greater "
-      "than memory available to the query for buffer reservations. Memory 
reservation "
-      "needed given the current plan: 200.00 MB");
-  // Neither coordinator or executor reservation fits.
-  schedule_state->set_largest_min_reservation(GIGABYTE);
-  ASSERT_FALSE(AdmissionController::CanAccommodateMaxInitialReservation(
-      *schedule_state, pool_config, &not_admitted_reason));
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
600.00 MB."
+      " Adjust the MEM_LIMIT option ");
+  ASSERT_FALSE(can_accomodate);
+}
+
+// Test rejection due to min-query-mem-limit clamping.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionExceedMinMemClamp) {
+  TPoolConfig pool_config;
+  string not_admitted_reason = "--not set--";
+  pool_config.__set_min_query_mem_limit(2 * MEGABYTE);
+  pool_config.__set_max_query_mem_limit(2 * MEGABYTE);
+  ScheduleState* schedule_state = MakeScheduleState("default", MEGABYTE, 
pool_config, 2,
+      DEFAULT_PER_EXEC_MEM_ESTIMATE, DEFAULT_COORD_MEM_ESTIMATE, true);
+  schedule_state->set_largest_min_reservation(600 * MEGABYTE);
+  schedule_state->set_coord_min_reservation(50 * MEGABYTE);
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(2 * MEGABYTE, schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(2 * MEGABYTE, schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(2 * MEGABYTE, schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(2 * MEGABYTE, schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
   EXPECT_STR_CONTAINS(not_admitted_reason,
-      "minimum memory reservation is greater "
-      "than memory available to the query for buffer reservations. Memory 
reservation "
-      "needed given the current plan: 1.00 GB");
-  // Coordinator reservation doesn't fit.
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
600.00 MB."
+      " Adjust the impala.admission-control.min-query-mem-limit of request 
pool "
+      "'default' ");
+  ASSERT_FALSE(can_accomodate);
+}
+
+// Test rejection due to max-query-mem-limit clamping.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionExceedMaxMemClamp) {
+  TPoolConfig pool_config;
+  string not_admitted_reason = "--not set--";
+  pool_config.__set_min_query_mem_limit(2 * MEGABYTE);
+  pool_config.__set_max_query_mem_limit(3 * MEGABYTE);
+  ScheduleState* schedule_state = MakeScheduleState("default", 4 * MEGABYTE, 
pool_config,
+      2, DEFAULT_PER_EXEC_MEM_ESTIMATE, DEFAULT_COORD_MEM_ESTIMATE, true);
+  schedule_state->set_largest_min_reservation(600 * MEGABYTE);
   schedule_state->set_coord_min_reservation(50 * MEGABYTE);
-  schedule_state->set_largest_min_reservation(GIGABYTE);
-  ASSERT_FALSE(AdmissionController::CanAccommodateMaxInitialReservation(
-      *schedule_state, pool_config, &not_admitted_reason));
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(3 * MEGABYTE, schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(3 * MEGABYTE, schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(3 * MEGABYTE, schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(3 * MEGABYTE, schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
+  EXPECT_STR_CONTAINS(not_admitted_reason,
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
600.00 MB."
+      " Adjust the impala.admission-control.max-query-mem-limit of request 
pool "
+      "'default' ");
+  ASSERT_FALSE(can_accomodate);
+}
+
+// Test rejection due to MEM_LIMIT_EXECUTORS exceeded.
+TEST_F(AdmissionControllerTest, 
DedicatedCoordAdmissionExceedMemLimitExecutors) {
+  FLAGS_clamp_query_mem_limit_backend_mem_limit = false;
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(ADMIT_MEM_LIMIT_BACKEND);
+  string not_admitted_reason = "--not set--";
+  ScheduleState* schedule_state =
+      DedicatedCoordAdmissionSetup(pool_config, -1, 3 * GIGABYTE, -1);
+  ASSERT_NE(nullptr, schedule_state);
+  schedule_state->set_largest_min_reservation(4 * GIGABYTE);
+  schedule_state->set_coord_min_reservation(50 * MEGABYTE);
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_EXECUTORS,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(3 * GIGABYTE, schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(3 * GIGABYTE, schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE,
+      schedule_state->coord_backend_mem_to_admit_source());
+  ASSERT_EQ(DEFAULT_COORD_MEM_ESTIMATE, 
schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(DEFAULT_COORD_MEM_ESTIMATE, 
schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
+  EXPECT_STR_CONTAINS(not_admitted_reason,
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
4.00 GB. "
+      "Adjust the MEM_LIMIT_EXECUTORS option ");
+  ASSERT_FALSE(can_accomodate);
+}
+
+// Test rejection due to MEM_LIMIT_COORDINATORS exceeded.
+TEST_F(AdmissionControllerTest, 
DedicatedCoordAdmissionExceedMemLimitCoordinators) {
+  FLAGS_clamp_query_mem_limit_backend_mem_limit = false;
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(ADMIT_MEM_LIMIT_BACKEND);
+  string not_admitted_reason = "--not set--";
+  ScheduleState* schedule_state =
+      DedicatedCoordAdmissionSetup(pool_config, -1, -1, 3 * GIGABYTE);
+  ASSERT_NE(nullptr, schedule_state);
+  schedule_state->set_largest_min_reservation(600 * MEGABYTE);
+  schedule_state->set_coord_min_reservation(4 * GIGABYTE);
+  ASSERT_EQ(MemLimitSourcePB::QUERY_PLAN_PER_HOST_MEM_ESTIMATE,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(DEFAULT_PER_EXEC_MEM_ESTIMATE, 
schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(DEFAULT_PER_EXEC_MEM_ESTIMATE, 
schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_COORDINATORS,
+      schedule_state->coord_backend_mem_to_admit_source());
+  ASSERT_EQ(3 * GIGABYTE, schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(3 * GIGABYTE, schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
+  EXPECT_STR_CONTAINS(not_admitted_reason,
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
4.00 GB. "
+      "Adjust the MEM_LIMIT_COORDINATORS option ");
+  ASSERT_FALSE(can_accomodate);
+}
+
+// Test rejection due to system memory limit.
+TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionExceedSystemMem) {
+  FLAGS_clamp_query_mem_limit_backend_mem_limit = true;
+  TPoolConfig pool_config;
+  pool_config.__set_min_query_mem_limit(MEGABYTE);
+  pool_config.__set_max_query_mem_limit(3 * ADMIT_MEM_LIMIT_BACKEND);
+  string not_admitted_reason = "--not set--";
+  ScheduleState* schedule_state = MakeScheduleState("default", -1, 
pool_config, 2,
+      2 * ADMIT_MEM_LIMIT_BACKEND, 2 * ADMIT_MEM_LIMIT_COORD, true);
+  schedule_state->set_largest_min_reservation(2 * ADMIT_MEM_LIMIT_BACKEND);
+  schedule_state->set_coord_min_reservation(50 * MEGABYTE);
+  schedule_state->UpdateMemoryRequirements(
+      pool_config, ADMIT_MEM_LIMIT_COORD, ADMIT_MEM_LIMIT_BACKEND);
+  ASSERT_EQ(MemLimitSourcePB::HOST_MEM_TRACKER_LIMIT,
+      schedule_state->per_backend_mem_to_admit_source());
+  ASSERT_EQ(ADMIT_MEM_LIMIT_BACKEND, 
schedule_state->per_backend_mem_to_admit());
+  ASSERT_EQ(ADMIT_MEM_LIMIT_BACKEND, schedule_state->per_backend_mem_limit());
+  ASSERT_EQ(MemLimitSourcePB::HOST_MEM_TRACKER_LIMIT,
+      schedule_state->coord_backend_mem_to_admit_source());
+  ASSERT_EQ(ADMIT_MEM_LIMIT_COORD, 
schedule_state->coord_backend_mem_to_admit());
+  ASSERT_EQ(ADMIT_MEM_LIMIT_COORD, schedule_state->coord_backend_mem_limit());
+  bool can_accomodate = CanAccommodateMaxInitialReservation(
+      *schedule_state, pool_config, &not_admitted_reason);
   EXPECT_STR_CONTAINS(not_admitted_reason,
-      "minimum memory reservation is greater "
-      "than memory available to the query for buffer reservations. Memory 
reservation "
-      "needed given the current plan: 1.00 GB");
+      "minimum memory reservation is greater than memory available to the 
query for "
+      "buffer reservations. Memory reservation needed given the current plan: 
2.00 GB. "
+      "Adjust the system memory or the CGroup memory limit ");
+  ASSERT_FALSE(can_accomodate);
 }
 
 /// Test that AdmissionController can identify 5 queries with top memory 
consumption
diff --git a/be/src/scheduling/admission-controller.cc 
b/be/src/scheduling/admission-controller.cc
index a85e3ba47..ef6449a70 100644
--- a/be/src/scheduling/admission-controller.cc
+++ b/be/src/scheduling/admission-controller.cc
@@ -201,15 +201,15 @@ const string 
REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_LIMIT =
     "max_query_mem_limit ($0 > $1)";
 const string REASON_MEM_LIMIT_TOO_LOW_FOR_RESERVATION =
     "minimum memory reservation is greater than memory available to the query 
for buffer "
-    "reservations. Memory reservation needed given the current plan: $0. 
Adjust either "
-    "the mem_limit or the pool config (max-query-mem-limit, 
min-query-mem-limit) for the "
-    "query to allow the query memory limit to be at least $1. Note that 
changing the "
-    "mem_limit may also change the plan. See the query profile for more 
information "
-    "about the per-node memory requirements.";
+    "reservations. Memory reservation needed given the current plan: $0. 
Adjust the $1 "
+    "for the query to allow the query memory limit to be at least $2. Note 
that changing "
+    "the memory limit may also change the plan. See '$3' in the "
+    "query profile for more information about the per-node memory 
requirements.";
 const string REASON_BUFFER_LIMIT_TOO_LOW_FOR_RESERVATION =
     "minimum memory reservation on backend '$0' is greater than memory 
available to the "
-    "query for buffer reservations. Increase the buffer_pool_limit to $1. See 
the query "
-    "profile for more information about the per-node memory requirements.";
+    "query for buffer reservations. Increase the buffer_pool_limit to $1. "
+    "See '$2' in the query profile for more information about "
+    "the per-node memory requirements.";
 const string REASON_NOT_ENOUGH_SLOTS_ON_BACKEND =
     "number of admission control slots needed ($0) on backend '$1' is greater 
than total "
     "slots available $2. Reduce MT_DOP or MAX_FRAGMENT_INSTANCES_PER_NODE to 
less than "
@@ -217,8 +217,8 @@ const string REASON_NOT_ENOUGH_SLOTS_ON_BACKEND =
 const string REASON_MIN_RESERVATION_OVER_POOL_MEM =
     "minimum memory reservation needed is greater than pool max mem resources. 
Pool "
     "max mem resources: $0. Cluster-wide memory reservation needed: $1. 
Increase the "
-    "pool max mem resources. See the query profile for more information about 
the "
-    "per-node memory requirements.";
+    "pool max mem resources. See '$2' in the query profile "
+    "for more information about the per-node memory requirements.";
 const string REASON_DISABLED_MAX_MEM_RESOURCES =
     "disabled by pool max mem resources set to 0";
 const string REASON_DISABLED_REQUESTS_LIMIT = "disabled by requests limit set 
to 0";
@@ -842,29 +842,78 @@ void AdmissionController::UpdateHostStats(const 
NetworkAddressPB& host_addr,
 // Helper method used by CanAccommodateMaxInitialReservation(). Returns true 
if the given
 // 'mem_limit' can accommodate 'buffer_reservation'. If not, returns false and 
the
 // details about the memory shortage in 'mem_unavailable_reason'.
-static bool CanMemLimitAccommodateReservation(
-    int64_t mem_limit, int64_t buffer_reservation, string* 
mem_unavailable_reason) {
+static bool CanMemLimitAccommodateReservation(const int64_t mem_limit,
+    const MemLimitSourcePB mem_limit_source, const int64_t buffer_reservation,
+    const string& request_pool, string* mem_unavailable_reason) {
   if (mem_limit <= 0) return true; // No mem limit.
   const int64_t max_reservation =
       ReservationUtil::GetReservationLimitFromMemLimit(mem_limit);
   if (buffer_reservation <= max_reservation) return true;
   const int64_t required_mem_limit =
       ReservationUtil::GetMinMemLimitFromReservation(buffer_reservation);
+  string config_name = "<config_name>";
+  switch (mem_limit_source) {
+    case MemLimitSourcePB::NO_LIMIT:
+      DCHECK(false) << "MemLimitSourcePB::NO_LIMIT only valid for mem_limit <= 
0";
+      break;
+    case MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT:
+    case MemLimitSourcePB::QUERY_PLAN_PER_HOST_MEM_ESTIMATE:
+    case MemLimitSourcePB::ADJUSTED_PER_HOST_MEM_ESTIMATE:
+    case MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE:
+    case MemLimitSourcePB::ADJUSTED_DEDICATED_COORDINATOR_MEM_ESTIMATE:
+      config_name = to_string(TImpalaQueryOptions::MEM_LIMIT) + " option";
+      break;
+    case MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_EXECUTORS:
+      config_name = to_string(TImpalaQueryOptions::MEM_LIMIT_EXECUTORS) + " 
option";
+      break;
+    case MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_COORDINATORS:
+      config_name = to_string(TImpalaQueryOptions::MEM_LIMIT_COORDINATORS) + " 
option";
+      break;
+    case MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT:
+      config_name =
+          Substitute("impala.admission-control.min-query-mem-limit of request 
pool '$0'",
+              request_pool);
+      break;
+    case MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT:
+      config_name =
+          Substitute("impala.admission-control.max-query-mem-limit of request 
pool '$0'",
+              request_pool);
+      break;
+    case MemLimitSourcePB::HOST_MEM_TRACKER_LIMIT:
+      config_name = "system memory or the CGroup memory limit";
+      break;
+    case MemLimitSourcePB::COORDINATOR_ONLY_OPTIMIZATION:
+      DCHECK(false) << "Coordinator only query should have mem_limit == 0";
+      break;
+    default:
+      DCHECK(false) << "Unknown MemLimitSourcePB enum: " << mem_limit_source;
+  }
   *mem_unavailable_reason = 
Substitute(REASON_MEM_LIMIT_TOO_LOW_FOR_RESERVATION,
-      PrintBytes(buffer_reservation), PrintBytes(required_mem_limit));
+      PrintBytes(buffer_reservation), config_name, 
PrintBytes(required_mem_limit),
+      Scheduler::PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION);
   return false;
 }
 
 bool AdmissionController::CanAccommodateMaxInitialReservation(const 
ScheduleState& state,
     const TPoolConfig& pool_cfg, string* mem_unavailable_reason) {
+  // Executors mem_limit.
   const int64_t executor_mem_limit = state.per_backend_mem_limit();
   const int64_t executor_min_reservation = state.largest_min_reservation();
+  if (executor_mem_limit > 0) {
+    DCHECK_EQ(executor_mem_limit, state.per_backend_mem_to_admit());
+  }
+  // Coordinator mem_limit.
   const int64_t coord_mem_limit = state.coord_backend_mem_limit();
   const int64_t coord_min_reservation = state.coord_min_reservation();
-  return CanMemLimitAccommodateReservation(
-             executor_mem_limit, executor_min_reservation, 
mem_unavailable_reason)
-      && CanMemLimitAccommodateReservation(
-             coord_mem_limit, coord_min_reservation, mem_unavailable_reason);
+  if (coord_mem_limit > 0) {
+    DCHECK_EQ(coord_mem_limit, state.coord_backend_mem_to_admit());
+  }
+  return CanMemLimitAccommodateReservation(executor_mem_limit,
+             state.per_backend_mem_to_admit_source(), executor_min_reservation,
+             state.request_pool(), mem_unavailable_reason)
+      && CanMemLimitAccommodateReservation(coord_mem_limit,
+          state.coord_backend_mem_to_admit_source(), coord_min_reservation,
+          state.request_pool(), mem_unavailable_reason);
 }
 
 bool AdmissionController::HasAvailableMemResources(const ScheduleState& state,
@@ -1135,7 +1184,8 @@ bool AdmissionController::RejectForSchedule(
     if (largest_min_mem_reservation.second > query_opts.buffer_pool_limit) {
       *rejection_reason = 
Substitute(REASON_BUFFER_LIMIT_TOO_LOW_FOR_RESERVATION,
           NetworkAddressPBToString(*largest_min_mem_reservation.first),
-          PrintBytes(largest_min_mem_reservation.second));
+          PrintBytes(largest_min_mem_reservation.second),
+          Scheduler::PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION);
       return true;
     }
   } else if (!CanAccommodateMaxInitialReservation(state, pool_cfg, 
rejection_reason)) {
@@ -1172,7 +1222,8 @@ bool AdmissionController::RejectForSchedule(
   if (max_mem > 0) {
     if (cluster_min_mem_reservation_bytes > max_mem) {
       *rejection_reason = Substitute(REASON_MIN_RESERVATION_OVER_POOL_MEM,
-          PrintBytes(max_mem), PrintBytes(cluster_min_mem_reservation_bytes));
+          PrintBytes(max_mem), PrintBytes(cluster_min_mem_reservation_bytes),
+          Scheduler::PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION);
       return true;
     }
     int64_t cluster_mem_to_admit = state.GetClusterMemoryToAdmit();
diff --git a/be/src/scheduling/schedule-state.cc 
b/be/src/scheduling/schedule-state.cc
index 0c7fb261a..2651ddab7 100644
--- a/be/src/scheduling/schedule-state.cc
+++ b/be/src/scheduling/schedule-state.cc
@@ -259,6 +259,42 @@ bool ScheduleState::UseDedicatedCoordEstimates() const {
   return false;
 }
 
+void ScheduleState::CompareMaxBackendMemToAdmit(
+    const int64_t new_limit, const MemLimitSourcePB source) {
+  DCHECK(query_schedule_pb_->has_per_backend_mem_to_admit());
+  if (query_schedule_pb_->per_backend_mem_to_admit() < new_limit) {
+    query_schedule_pb_->set_per_backend_mem_to_admit(new_limit);
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(source);
+  }
+}
+
+void ScheduleState::CompareMinBackendMemToAdmit(
+    const int64_t new_limit, const MemLimitSourcePB source) {
+  DCHECK(query_schedule_pb_->has_per_backend_mem_to_admit());
+  if (query_schedule_pb_->per_backend_mem_to_admit() > new_limit) {
+    query_schedule_pb_->set_per_backend_mem_to_admit(new_limit);
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(source);
+  }
+}
+
+void ScheduleState::CompareMaxCoordinatorMemToAdmit(
+    const int64_t new_limit, const MemLimitSourcePB source) {
+  DCHECK(query_schedule_pb_->has_coord_backend_mem_to_admit());
+  if (query_schedule_pb_->coord_backend_mem_to_admit() < new_limit) {
+    query_schedule_pb_->set_coord_backend_mem_to_admit(new_limit);
+    query_schedule_pb_->set_coord_backend_mem_to_admit_source(source);
+  }
+}
+
+void ScheduleState::CompareMinCoordinatorMemToAdmit(
+    const int64_t new_limit, const MemLimitSourcePB source) {
+  DCHECK(query_schedule_pb_->has_coord_backend_mem_to_admit());
+  if (query_schedule_pb_->coord_backend_mem_to_admit() > new_limit) {
+    query_schedule_pb_->set_coord_backend_mem_to_admit(new_limit);
+    query_schedule_pb_->set_coord_backend_mem_to_admit_source(source);
+  }
+}
+
 void ScheduleState::UpdateMemoryRequirements(const TPoolConfig& pool_cfg,
     int64_t coord_mem_limit_admission, int64_t executor_mem_limit_admission) {
   // If the min_query_mem_limit and max_query_mem_limit are not set in the 
pool config
@@ -268,51 +304,69 @@ void ScheduleState::UpdateMemoryRequirements(const 
TPoolConfig& pool_cfg,
       pool_cfg.min_query_mem_limit == 0 && pool_cfg.max_query_mem_limit == 0;
   const bool use_dedicated_coord_estimates = UseDedicatedCoordEstimates();
 
-  int64_t per_backend_mem_to_admit = 0;
-  int64_t coord_backend_mem_to_admit = 0;
+  query_schedule_pb_->set_per_backend_mem_to_admit(0);
+  
query_schedule_pb_->set_per_backend_mem_to_admit_source(MemLimitSourcePB::NO_LIMIT);
+  query_schedule_pb_->set_coord_backend_mem_to_admit(0);
+  
query_schedule_pb_->set_coord_backend_mem_to_admit_source(MemLimitSourcePB::NO_LIMIT);
   bool is_mem_limit_set = false;
   if (query_options().__isset.mem_limit && query_options().mem_limit > 0) {
-    per_backend_mem_to_admit = query_options().mem_limit;
-    coord_backend_mem_to_admit = query_options().mem_limit;
+    
query_schedule_pb_->set_per_backend_mem_to_admit(query_options().mem_limit);
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(
+        MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT);
+    
query_schedule_pb_->set_coord_backend_mem_to_admit(query_options().mem_limit);
+    query_schedule_pb_->set_coord_backend_mem_to_admit_source(
+        MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT);
     is_mem_limit_set = true;
   }
 
   if (!is_mem_limit_set) {
-    per_backend_mem_to_admit = GetPerExecutorMemoryEstimate();
-    coord_backend_mem_to_admit = use_dedicated_coord_estimates ?
-        GetDedicatedCoordMemoryEstimate() :
-        GetPerExecutorMemoryEstimate();
+    
query_schedule_pb_->set_per_backend_mem_to_admit(GetPerExecutorMemoryEstimate());
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(
+        MemLimitSourcePB::QUERY_PLAN_PER_HOST_MEM_ESTIMATE);
+    if (use_dedicated_coord_estimates) {
+      query_schedule_pb_->set_coord_backend_mem_to_admit(
+          GetDedicatedCoordMemoryEstimate());
+      query_schedule_pb_->set_coord_backend_mem_to_admit_source(
+          MemLimitSourcePB::QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE);
+    } else {
+      
query_schedule_pb_->set_coord_backend_mem_to_admit(GetPerExecutorMemoryEstimate());
+      query_schedule_pb_->set_coord_backend_mem_to_admit_source(
+          MemLimitSourcePB::QUERY_PLAN_PER_HOST_MEM_ESTIMATE);
+    }
     VLOG(3) << "use_dedicated_coord_estimates=" << 
use_dedicated_coord_estimates
-            << " coord_backend_mem_to_admit=" << coord_backend_mem_to_admit
-            << " per_backend_mem_to_admit=" << per_backend_mem_to_admit;
+            << " coord_backend_mem_to_admit="
+            << query_schedule_pb_->coord_backend_mem_to_admit()
+            << " per_backend_mem_to_admit="
+            << query_schedule_pb_->per_backend_mem_to_admit();
     if (!mimic_old_behaviour) {
       int64_t min_mem_limit_required =
           
ReservationUtil::GetMinMemLimitFromReservation(largest_min_reservation());
-      per_backend_mem_to_admit = max(per_backend_mem_to_admit, 
min_mem_limit_required);
+      CompareMaxBackendMemToAdmit(
+          min_mem_limit_required, 
MemLimitSourcePB::ADJUSTED_PER_HOST_MEM_ESTIMATE);
       int64_t min_coord_mem_limit_required =
           
ReservationUtil::GetMinMemLimitFromReservation(coord_min_reservation());
-      coord_backend_mem_to_admit =
-          max(coord_backend_mem_to_admit, min_coord_mem_limit_required);
+      CompareMaxCoordinatorMemToAdmit(min_coord_mem_limit_required,
+          MemLimitSourcePB::ADJUSTED_DEDICATED_COORDINATOR_MEM_ESTIMATE);
     }
   }
 
   if (!is_mem_limit_set || pool_cfg.clamp_mem_limit_query_option) {
     if (pool_cfg.min_query_mem_limit > 0) {
-      per_backend_mem_to_admit =
-          max(per_backend_mem_to_admit, pool_cfg.min_query_mem_limit);
+      CompareMaxBackendMemToAdmit(pool_cfg.min_query_mem_limit,
+          MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT);
       if (!use_dedicated_coord_estimates || is_mem_limit_set) {
         // The minimum mem limit option does not apply to dedicated 
coordinators -
         // this would result in over-reserving of memory. Treat coordinator and
         // executor mem limits the same if the query option was explicitly set.
-        coord_backend_mem_to_admit =
-            max(coord_backend_mem_to_admit, pool_cfg.min_query_mem_limit);
+        CompareMaxCoordinatorMemToAdmit(pool_cfg.min_query_mem_limit,
+            MemLimitSourcePB::POOL_CONFIG_MIN_QUERY_MEM_LIMIT);
       }
     }
     if (pool_cfg.max_query_mem_limit > 0) {
-      per_backend_mem_to_admit =
-          min(per_backend_mem_to_admit, pool_cfg.max_query_mem_limit);
-      coord_backend_mem_to_admit =
-          min(coord_backend_mem_to_admit, pool_cfg.max_query_mem_limit);
+      CompareMinBackendMemToAdmit(pool_cfg.max_query_mem_limit,
+          MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT);
+      CompareMinCoordinatorMemToAdmit(pool_cfg.max_query_mem_limit,
+          MemLimitSourcePB::POOL_CONFIG_MAX_QUERY_MEM_LIMIT);
     }
   }
 
@@ -321,43 +375,53 @@ void ScheduleState::UpdateMemoryRequirements(const 
TPoolConfig& pool_cfg,
       query_options().__isset.mem_limit_coordinators
       && query_options().mem_limit_coordinators > 0;
   if (!is_mem_limit_set && is_mem_limit_coordinators_set) {
-    coord_backend_mem_to_admit = query_options().mem_limit_coordinators;
+    query_schedule_pb_->set_coord_backend_mem_to_admit(
+        query_options().mem_limit_coordinators);
+    query_schedule_pb_->set_coord_backend_mem_to_admit_source(
+        MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_COORDINATORS);
   }
 
   // Enforce the MEM_LIMIT_EXECUTORS query option if MEM_LIMIT is not 
specified.
   const bool is_mem_limit_executors_set = 
query_options().__isset.mem_limit_executors
       && query_options().mem_limit_executors > 0;
   if (!is_mem_limit_set && is_mem_limit_executors_set) {
-    per_backend_mem_to_admit = query_options().mem_limit_executors;
+    
query_schedule_pb_->set_per_backend_mem_to_admit(query_options().mem_limit_executors);
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(
+        MemLimitSourcePB::QUERY_OPTION_MEM_LIMIT_EXECUTORS);
   }
 
   // Cap the memory estimate at the backend's memory limit for admission. The 
user's
   // provided value or the estimate from planning can each be unreasonable.
   if (FLAGS_clamp_query_mem_limit_backend_mem_limit) {
-    per_backend_mem_to_admit =
-        min(per_backend_mem_to_admit, executor_mem_limit_admission);
-    coord_backend_mem_to_admit =
-        min(coord_backend_mem_to_admit, coord_mem_limit_admission);
+    CompareMinBackendMemToAdmit(
+        executor_mem_limit_admission, 
MemLimitSourcePB::HOST_MEM_TRACKER_LIMIT);
+    CompareMinCoordinatorMemToAdmit(
+        coord_mem_limit_admission, MemLimitSourcePB::HOST_MEM_TRACKER_LIMIT);
   }
   // If the query is only scheduled to run on the coordinator.
   if (per_backend_schedule_states_.size() == 1 && 
RequiresCoordinatorFragment()) {
-    per_backend_mem_to_admit = 0;
+    query_schedule_pb_->set_per_backend_mem_to_admit(0);
+    query_schedule_pb_->set_per_backend_mem_to_admit_source(
+        MemLimitSourcePB::COORDINATOR_ONLY_OPTIMIZATION);
   }
 
-  int64_t per_backend_mem_limit;
   if (mimic_old_behaviour && !is_mem_limit_set && !is_mem_limit_executors_set
       && !is_mem_limit_coordinators_set) {
-    per_backend_mem_limit = -1;
+    query_schedule_pb_->set_per_backend_mem_limit(-1);
     query_schedule_pb_->set_coord_backend_mem_limit(-1);
   } else {
-    per_backend_mem_limit = per_backend_mem_to_admit;
-    
query_schedule_pb_->set_coord_backend_mem_limit(coord_backend_mem_to_admit);
+    query_schedule_pb_->set_per_backend_mem_limit(
+        query_schedule_pb_->per_backend_mem_to_admit());
+    query_schedule_pb_->set_coord_backend_mem_limit(
+        query_schedule_pb_->coord_backend_mem_to_admit());
   }
-
-  
query_schedule_pb_->set_coord_backend_mem_to_admit(coord_backend_mem_to_admit);
-  query_schedule_pb_->set_per_backend_mem_limit(per_backend_mem_limit);
-  query_schedule_pb_->set_per_backend_mem_to_admit(per_backend_mem_to_admit);
   query_schedule_pb_->set_cluster_mem_est(GetClusterMemoryToAdmit());
+
+  // Validate fields are set.
+  DCHECK(query_schedule_pb_->has_per_backend_mem_to_admit());
+  DCHECK(query_schedule_pb_->has_coord_backend_mem_to_admit());
+  DCHECK(query_schedule_pb_->has_per_backend_mem_to_admit_source());
+  DCHECK(query_schedule_pb_->has_coord_backend_mem_to_admit_source());
 }
 
 void ScheduleState::set_executor_group(string executor_group) {
diff --git a/be/src/scheduling/schedule-state.h 
b/be/src/scheduling/schedule-state.h
index cbfde738b..f1a338526 100644
--- a/be/src/scheduling/schedule-state.h
+++ b/be/src/scheduling/schedule-state.h
@@ -253,6 +253,12 @@ class ScheduleState {
     return query_schedule_pb_->per_backend_mem_to_admit();
   }
 
+  /// Must call UpdateMemoryRequirements() at least once before calling this.
+  MemLimitSourcePB per_backend_mem_to_admit_source() const {
+    DCHECK(query_schedule_pb_->has_per_backend_mem_to_admit_source());
+    return query_schedule_pb_->per_backend_mem_to_admit_source();
+  }
+
   /// Must call UpdateMemoryRequirements() at least once before calling this.
   int64_t coord_backend_mem_limit() const {
     return query_schedule_pb_->coord_backend_mem_limit();
@@ -264,6 +270,12 @@ class ScheduleState {
     return query_schedule_pb_->coord_backend_mem_to_admit();
   }
 
+  /// Must call UpdateMemoryRequirements() at least once before calling this.
+  MemLimitSourcePB coord_backend_mem_to_admit_source() const {
+    DCHECK(query_schedule_pb_->has_coord_backend_mem_to_admit_source());
+    return query_schedule_pb_->coord_backend_mem_to_admit_source();
+  }
+
   void set_largest_min_reservation(const int64_t largest_min_reservation) {
     largest_min_reservation_ = largest_min_reservation;
   }
@@ -359,6 +371,19 @@ class ScheduleState {
   bool RequiresCoordinatorFragment() const {
     return request_.stmt_type == TStmtType::QUERY;
   }
+
+  /// Helper functions to update either
+  /// 'query_schedule_pb_->per_backend_mem_to_admit' or
+  /// 'query_schedule_pb_->coord_backend_mem_to_admit' along with the limiting 
reason
+  /// 'source' if the 'new_limit' is less or more than the current value.
+  void CompareMaxBackendMemToAdmit(
+      const int64_t new_limit, const MemLimitSourcePB source);
+  void CompareMinBackendMemToAdmit(
+      const int64_t new_limit, const MemLimitSourcePB source);
+  void CompareMaxCoordinatorMemToAdmit(
+      const int64_t new_limit, const MemLimitSourcePB source);
+  void CompareMinCoordinatorMemToAdmit(
+      const int64_t new_limit, const MemLimitSourcePB source);
 };
 
 } // namespace impala
diff --git a/be/src/scheduling/scheduler.cc b/be/src/scheduling/scheduler.cc
index 6cc002233..e13c437e5 100644
--- a/be/src/scheduling/scheduler.cc
+++ b/be/src/scheduling/scheduler.cc
@@ -82,6 +82,9 @@ static const vector<TPlanNodeType::type> 
SCAN_NODE_TYPES{TPlanNodeType::HDFS_SCA
 // candidates. See GetRemoteExecutorCandidates() for a deeper description.
 static const int MAX_ITERATIONS_PER_EXECUTOR_CANDIDATE = 8;
 
+const string Scheduler::PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION =
+    "Per Host Min Memory Reservation";
+
 Scheduler::Scheduler(MetricGroup* metrics, RequestPoolService* 
request_pool_service)
   : metrics_(metrics->GetOrCreateChildGroup("scheduler")),
     request_pool_service_(request_pool_service) {
@@ -1280,7 +1283,7 @@ void Scheduler::ComputeBackendExecParams(
                               << ") ";
   }
   state->summary_profile()->AddInfoString(
-      "Per Host Min Memory Reservation", min_mem_reservation_ss.str());
+      PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION, 
min_mem_reservation_ss.str());
   state->summary_profile()->AddInfoString(
       "Per Host Number of Fragment Instances", 
num_fragment_instances_ss.str());
 }
diff --git a/be/src/scheduling/scheduler.h b/be/src/scheduling/scheduler.h
index 7ed3baeee..9abcab40b 100644
--- a/be/src/scheduling/scheduler.h
+++ b/be/src/scheduling/scheduler.h
@@ -60,6 +60,8 @@ class SchedulerWrapper;
 ///           configuration.
 class Scheduler {
  public:
+  static const std::string PROFILE_INFO_KEY_PER_HOST_MIN_MEMORY_RESERVATION;
+
   Scheduler(MetricGroup* metrics, RequestPoolService* request_pool_service);
 
   /// Current snapshot of executors to be used for scheduling a scan.
diff --git a/common/protobuf/admission_control_service.proto 
b/common/protobuf/admission_control_service.proto
index e8eb2d16a..4954ac572 100644
--- a/common/protobuf/admission_control_service.proto
+++ b/common/protobuf/admission_control_service.proto
@@ -143,6 +143,23 @@ message FragmentExecParamsPB {
   optional RuntimeFilterAggregatorInfoPB filter_agg_info = 6;
 }
 
+// Enum describing the initialization source of per_backend_mem_to_admit
+// and coord_backend_mem_to_admit of QuerySchedulePB.
+enum MemLimitSourcePB {
+  NO_LIMIT = 0;
+  QUERY_OPTION_MEM_LIMIT = 1;
+  QUERY_PLAN_PER_HOST_MEM_ESTIMATE = 2;
+  ADJUSTED_PER_HOST_MEM_ESTIMATE = 3;
+  QUERY_PLAN_DEDICATED_COORDINATOR_MEM_ESTIMATE = 4;
+  ADJUSTED_DEDICATED_COORDINATOR_MEM_ESTIMATE = 5;
+  QUERY_OPTION_MEM_LIMIT_EXECUTORS = 6;
+  QUERY_OPTION_MEM_LIMIT_COORDINATORS = 7;
+  COORDINATOR_ONLY_OPTIMIZATION = 8;
+  POOL_CONFIG_MIN_QUERY_MEM_LIMIT = 9;
+  POOL_CONFIG_MAX_QUERY_MEM_LIMIT = 10;
+  HOST_MEM_TRACKER_LIMIT = 11;
+}
+
 // Contains the output from scheduling and admission control that is used by 
the
 // coordinator to start query execution.
 message QuerySchedulePB {
@@ -183,6 +200,12 @@ message QuerySchedulePB {
 
   // Mapping to store which data file is read on which hosts, grouped by scan 
node ID.
   map<int32, FilepathToHostsMapPB> by_node_filepath_to_hosts = 10;
+
+  // Source of per_backend_mem_to_admit.
+  optional MemLimitSourcePB per_backend_mem_to_admit_source = 11;
+
+  // Source of coord_backend_mem_to_admit.
+  optional MemLimitSourcePB coord_backend_mem_to_admit_source = 12;
 }
 
 message AdmitQueryRequestPB {
diff --git a/docs/topics/impala_admission.xml b/docs/topics/impala_admission.xml
index 58aef0d24..454341f60 100644
--- a/docs/topics/impala_admission.xml
+++ b/docs/topics/impala_admission.xml
@@ -221,7 +221,12 @@ under the License.
         <dlentry>
           <dt>Minimum Query Memory Limit and Maximum Query Memory Limit</dt>
           <dd>
-            <p>These two options determine the minimum and maximum per-host
+            <p>These are
+              <codeph>impala.admission-control.min-query-mem-limit.*</codeph>
+              and 
<codeph>impala.admission-control.max-query-mem-limit.*</codeph>
+              configurations in <filepath>llama-site.xml</filepath> (See
+              <xref href="impala_admission_config.xml#concept_cz4_vxz_jgb"/>).
+              They determine the minimum and maximum per-host
               memory limit that will be chosen by Impala Admission control for
               queries in this resource pool. If set, Impala Admission Control
               will choose a memory limit between the minimum and maximum values
@@ -275,13 +280,16 @@ under the License.
       <dl>
         <dlentry>
           <dt> Clamp MEM_LIMIT Query Option</dt>
-          <dd>If this field is not selected, the <codeph>MEM_LIMIT</codeph>
-            query option will not be bounded by the <b>Maximum Query Memory
-              Limit</b> and the <b>Minimum Query Memory Limit</b> values
-            specified for this resource pool. By default, this field is 
selected
-            in Impala 3.1 and higher. The field is disabled if both <b>Minimum
-              Query Memory Limit</b> and <b>Maximum Query Memory Limit</b> are
-            not set.</dd>
+          <dd>This is
+            
<codeph>impala.admission-control.clamp-mem-limit-query-option.*</codeph>
+            configuration in <filepath>llama-site.xml</filepath>.
+            If this configuration is set to <codeph>false</codeph>,
+            the <codeph>MEM_LIMIT</codeph> query option will not be bounded by 
the
+            <b>Maximum Query Memory Limit</b> and the <b>Minimum Query Memory 
Limit</b>
+            values specified for this resource pool. By default, this 
configuration is
+            set to <codeph>true</codeph> in Impala 3.1 and higher. This 
configuration
+            is ignored if both <b>Minimum Query Memory Limit</b> and
+            <b>Maximum Query Memory Limit</b> are not set.</dd>
         </dlentry>
       </dl>
       <p
@@ -298,7 +306,7 @@ under the License.
     <title>Setting Per-query Memory Limits</title>
     <conbody>
       <p>Use per-query memory limits to prevent queries from consuming 
excessive
-        memory resources that impact other queries. We recommends that you set
+        memory resources that impact other queries. We recommend that you set
         the query memory limits whenever possible.</p>
       <p>If you set the <b>Max Memory</b> for a resource pool, Impala attempts
         to throttle queries if there is not enough memory to run them within 
the
@@ -314,6 +322,51 @@ under the License.
         perform poorly or even be cancelled.</p>
     </conbody>
   </concept>
+  <concept id="examples_of_query_rejection_by_admission_control">
+    <title>Examples of Query Rejection by Admission Control</title>
+    <conbody>
+      <dl>
+        <dlentry>
+          <dt>The minimum memory to start a query is not satisfied</dt>
+          <dd>
+            <p>Impala will attempt to start a query as long as the minimum 
memory
+              requirement to run that query can be satisfied by all executor 
nodes.
+              In the event where Admission Control determines that the minimum 
memory
+              requirement can not be satisfied by existing memory limit 
configurations
+              (<codeph>MEM_LIMIT</codeph> query option or other memory limit
+              configurations at request pool) or available system memory in 
one or more
+              executor nodes, it will reject the query and the query will not 
execute
+              at all. Admission Control will return an error message 
describing what is
+              happening and recommend which configuration to adjust so that 
the query
+              can pass Admission Control. Take a look at the last query 
examples from
+              <xref href="impala_mem_limit.xml"/></p>
+<codeblock rev="">
+[localhost:21000] > set mem_limit=15mb;
+MEM_LIMIT set to 15mb
+[localhost:21000] > select count(distinct c_name) from customer;
+Query: select count(distinct c_name) from customer
+ERROR:
+Rejected query from pool default-pool: minimum memory reservation is greater 
than memory available to the query
+for buffer reservations. Memory reservation needed given the current plan: 
38.00 MB. Adjust MEM_LIMIT option
+for the query to allow the query memory limit to be at least 70.00 MB. Note 
that changing the memory limit may
+also change the plan. See 'Per Host Min Memory Reservation' in the query 
profile for more information about the
+per-node memory requirements.</codeblock>
+            <p>Admission Control rejects this query because 
<codeph>MEM_LIMIT</codeph>
+              is set too low such that it is insufficient to start the query, 
which
+              requires 70.00 MB (38.00 MB + 32.00 MB overhead) at minimum for 
one or more
+              executor nodes. The error message contains recommendations on 
what
+              configuration to adjust depending on which limitation causes 
rejection.
+              In this case, Admission Controller recommends raising query 
option
+              <codeph>MEM_LIMIT</codeph> &gt;= 70mb so that the minimum memory
+              requirement is satisfied to start the query.
+              Users can also inspect 'Per Host Min Memory Reservation' info at 
the query
+              profile to check which executor node(s) require 38.00 MB minimum 
memory
+              reservation.</p>
+          </dd>
+        </dlentry>
+      </dl>
+    </conbody>
+  </concept>
 
   <concept id="admission_yarn">
 
diff --git a/docs/topics/impala_mem_limit.xml b/docs/topics/impala_mem_limit.xml
index d3a5db5fb..af8605bd7 100644
--- a/docs/topics/impala_mem_limit.xml
+++ b/docs/topics/impala_mem_limit.xml
@@ -205,10 +205,10 @@ MEM_LIMIT set to 15mb
 Query: select count(distinct c_name) from customer
 ERROR:
 Rejected query from pool default-pool: minimum memory reservation is greater 
than memory available to the query
-for buffer reservations. Memory reservation needed given the current plan: 
38.00 MB. Adjust either the mem_limit
-or the pool config (max-query-mem-limit, min-query-mem-limit) for the query to 
allow the query memory limit to be
-at least 70.00 MB. Note that changing the mem_limit may also change the plan. 
See the query profile for more
-information about the per-node memory requirements.</codeblock>
+for buffer reservations. Memory reservation needed given the current plan: 
38.00 MB. Adjust MEM_LIMIT option
+for the query to allow the query memory limit to be at least 70.00 MB. Note 
that changing the memory limit may
+also change the plan. See 'Per Host Min Memory Reservation' in the query 
profile for more information about the
+per-node memory requirements.</codeblock>
 </conbody>
 <concept id="mem_limit_executors">
   <title>MEM_LIMIT_EXECUTORS Query Option</title>
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
 
b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
index c38ec124d..19391d597 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
@@ -32,11 +32,11 @@ select * from functional_parquet.alltypes limit 1;
 ---- CATCH
 Rejected query from pool root.poolLowMaxLimit: minimum memory reservation is 
greater than
  memory available to the query for buffer reservations. Memory reservation 
needed given
- the current plan: 88.00 KB. Adjust either the mem_limit or the pool config
- (max-query-mem-limit, min-query-mem-limit) for the query to allow the query
- memory limit to be at least 32.09 MB. Note that changing the mem_limit may 
also change
- the plan. See the query profile for more information about the per-node memory
- requirements.
+ the current plan: 88.00 KB. Adjust the 
impala.admission-control.max-query-mem-limit of
+ request pool 'root.poolLowMaxLimit' for the query to allow the query memory 
limit to be
+ at least 32.09 MB. Note that changing the memory limit may also change the 
plan. See
+ 'Per Host Min Memory Reservation' in the query profile for more information 
about the
+ per-node memory requirements.
 ====
 ---- QUERY
 # No mem_limit set
@@ -92,11 +92,10 @@ select * from functional_parquet.alltypes limit 1;
 ---- CATCH
 Rejected query from pool root.poolLowMinLimit: minimum memory reservation is 
greater than
  memory available to the query for buffer reservations. Memory reservation 
needed given
- the current plan: 88.00 KB. Adjust either the mem_limit or the pool config
- (max-query-mem-limit, min-query-mem-limit) for the query to allow the query
- memory limit to be at least 32.09 MB. Note that changing the mem_limit may 
also change
- the plan. See the query profile for more information about the per-node memory
- requirements.
+ the current plan: 88.00 KB. Adjust the MEM_LIMIT option for the query to 
allow the query
+ memory limit to be at least 32.09 MB. Note that changing the memory limit may 
also
+ change the plan. See 'Per Host Min Memory Reservation' in the query profile 
for more
+ information about the per-node memory requirements.
 ====
 ---- QUERY
 # mem_limit is set and pool.clamp_mem_limit_query_option is true
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
 
b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
index 5deb608f1..dbbdde9c9 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
@@ -5,9 +5,10 @@ select distinct * from functional_parquet.alltypesagg
 ---- CATCH
 minimum memory reservation is greater than memory available to the
  query for buffer reservations. Memory reservation needed given the
- current plan: 68.09 MB. Adjust either the mem_limit or the pool config
- (max-query-mem-limit, min-query-mem-limit) for the query to allow the query
- memory limit to be at least 100.09 MB.
+ current plan: 68.09 MB. Adjust the MEM_LIMIT option for the query to allow 
the query
+ memory limit to be at least 100.09 MB. Note that changing the memory limit
+ may also change the plan. See 'Per Host Min Memory Reservation' in the query
+ profile for more information about the per-node memory requirements.
 ====
 ---- QUERY
 set mem_limit=150mb;
@@ -15,7 +16,9 @@ select distinct * from functional_parquet.alltypesagg
 ---- CATCH
 minimum memory reservation needed is greater than pool max mem resources.
  Pool max mem resources: 40.00 MB.
- Cluster-wide memory reservation needed: 208.26 MB
+ Cluster-wide memory reservation needed: 208.26 MB.
+ Increase the pool max mem resources. See 'Per Host Min Memory Reservation' in 
the query
+ profile for more information about the per-node memory requirements.
 ====
 ---- QUERY
 set buffer_pool_limit=10mb;
@@ -32,9 +35,10 @@ from tpch_parquet.lineitem join tpch_parquet.orders on 
l_orderkey = o_orderkey
 ---- CATCH
 minimum memory reservation is greater than memory available to the
  query for buffer reservations. Memory reservation needed given the
- current plan: 44.00 MB. Adjust either the mem_limit or the pool config
- (max-query-mem-limit, min-query-mem-limit) for the query to allow the query
- memory limit to be at least 76.00 MB.
+ current plan: 44.00 MB. Adjust the MEM_LIMIT option for the query to allow 
the query
+ memory limit to be at least 76.00 MB. Note that changing the memory limit
+ may also change the plan. See 'Per Host Min Memory Reservation' in the query
+ profile for more information about the per-node memory requirements.
 ====
 ---- QUERY
 set mem_limit=50mb;
@@ -43,7 +47,8 @@ from tpch_parquet.lineitem join tpch_parquet.orders on 
l_orderkey = o_orderkey
 ---- CATCH
 minimum memory reservation is greater than memory available to the
  query for buffer reservations. Memory reservation needed given the
- current plan: 43.00 MB. Adjust either the mem_limit or the pool config
- (max-query-mem-limit, min-query-mem-limit) for the query to allow the query
- memory limit to be at least 75.00 MB.
+ current plan: 43.00 MB. Adjust the MEM_LIMIT option for the query to allow 
the query
+ memory limit to be at least 75.00 MB. Note that changing the memory limit
+ may also change the plan. See 'Per Host Min Memory Reservation' in the query
+ profile for more information about the per-node memory requirements.
 ====
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
 
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
index a3eddef75..c6943714c 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
@@ -32,7 +32,8 @@ select STRAIGHT_JOIN * from alltypes a join [SHUFFLE] 
alltypes b
 ---- CATCH
 row_regex:.*minimum memory reservation on backend '.*' is greater than memory 
available to
  the query for buffer reservations\. Increase the buffer_pool_limit to 294.17 
MB\. See
- the query profile for more information about the per-node memory 
requirements\.
+ 'Per Host Min Memory Reservation' in the query profile for more information 
about the
+ per-node memory requirements\.
 ====
 ---- QUERY
 # Confirm that with broadcast join, memory limit is not hit.
@@ -66,7 +67,8 @@ select STRAIGHT_JOIN * from alltypes a join [SHUFFLE] 
alltypes b
 ---- CATCH
 row_regex:.*minimum memory reservation on backend '.*' is greater than memory 
available to
  the query for buffer reservations\. Increase the buffer_pool_limit to 390.11 
MB\. See
- the query profile for more information about the per-node memory 
requirements\.
+ 'Per Host Min Memory Reservation' in the query profile for more information 
about the
+ per-node memory requirements\.
 ====
 ---- QUERY
 # Confirm that with broadcast join, memory limit is not hit.

Reply via email to