Repository: incubator-impala
Updated Branches:
  refs/heads/master d5b0c6b93 -> a98b90bd3


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
 
b/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
index 58fe1bf..920195b 100644
--- 
a/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
+++ 
b/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
@@ -21,7 +21,7 @@ Per-Host Resources: mem-estimate=24.00MB 
mem-reservation=1.06MB
 |  hash predicates: c_nationkey = n_nationkey
 |  fk/pk conjuncts: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
-|  mem-estimate=3.15KB mem-reservation=1.06MB
+|  mem-estimate=3.15KB mem-reservation=1.06MB spill-buffer=64.00KB
 |  tuple-ids=0,1 row-size=355B cardinality=150000
 |
 |--03:EXCHANGE [BROADCAST]
@@ -66,7 +66,7 @@ Per-Host Resources: mem-estimate=48.01MB 
mem-reservation=2.12MB
 |  hash predicates: c_nationkey = n_nationkey
 |  fk/pk conjuncts: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
-|  mem-estimate=3.15KB mem-reservation=1.06MB
+|  mem-estimate=3.15KB mem-reservation=1.06MB spill-buffer=64.00KB
 |  tuple-ids=0,1 row-size=355B cardinality=150000
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -104,7 +104,7 @@ select straight_join *
 from tpch_parquet.lineitem
     left join tpch_parquet.orders on l_orderkey = o_orderkey
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=136.00MB
+Per-Host Resource Reservation: Memory=34.00MB
 Per-Host Resource Estimates: Memory=420.41MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -117,11 +117,11 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1N row-size=454B cardinality=6001215
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
-Per-Host Resources: mem-estimate=380.41MB mem-reservation=136.00MB
+Per-Host Resources: mem-estimate=380.41MB mem-reservation=34.00MB
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
-|  mem-estimate=300.41MB mem-reservation=136.00MB
+|  mem-estimate=300.41MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1N row-size=454B cardinality=6001215
 |
 |--03:EXCHANGE [BROADCAST]
@@ -146,7 +146,7 @@ Per-Host Resources: mem-estimate=380.41MB 
mem-reservation=136.00MB
    mem-estimate=80.00MB mem-reservation=0B
    tuple-ids=0 row-size=263B cardinality=6001215
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=272.00MB
+Per-Host Resource Reservation: Memory=68.00MB
 Per-Host Resource Estimates: Memory=840.83MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -159,12 +159,12 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1N row-size=454B cardinality=6001215
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
-Per-Host Resources: mem-estimate=760.83MB mem-reservation=272.00MB
+Per-Host Resources: mem-estimate=760.83MB mem-reservation=68.00MB
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash-table-id=00
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
-|  mem-estimate=300.41MB mem-reservation=136.00MB
+|  mem-estimate=300.41MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1N row-size=454B cardinality=6001215
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -219,7 +219,7 @@ Per-Host Resources: mem-estimate=18.69MB 
mem-reservation=34.00MB
 |  hash predicates: o_custkey = c_custkey
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
-|  mem-estimate=18.69MB mem-reservation=34.00MB
+|  mem-estimate=18.69MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 |--04:EXCHANGE [HASH(c_custkey)]
@@ -270,7 +270,7 @@ Per-Host Resources: mem-estimate=18.69MB 
mem-reservation=34.00MB
 |  hash predicates: o_custkey = c_custkey
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
-|  mem-estimate=9.35MB mem-reservation=17.00MB
+|  mem-estimate=9.35MB mem-reservation=17.00MB spill-buffer=1.00MB
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 |--F04:PLAN FRAGMENT [HASH(o_custkey)] hosts=1 instances=2
@@ -314,7 +314,7 @@ select straight_join *
 from tpch_parquet.orders
     join /*+broadcast*/ tpch_parquet.customer on o_custkey = c_custkey
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=68.00MB
+Per-Host Resource Reservation: Memory=34.00MB
 Per-Host Resource Estimates: Memory=101.38MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -327,12 +327,12 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
-Per-Host Resources: mem-estimate=77.38MB mem-reservation=68.00MB
+Per-Host Resources: mem-estimate=77.38MB mem-reservation=34.00MB
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
-|  mem-estimate=37.38MB mem-reservation=68.00MB
+|  mem-estimate=37.38MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 |--03:EXCHANGE [BROADCAST]
@@ -358,7 +358,7 @@ Per-Host Resources: mem-estimate=77.38MB 
mem-reservation=68.00MB
    mem-estimate=40.00MB mem-reservation=0B
    tuple-ids=0 row-size=191B cardinality=1500000
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=136.00MB
+Per-Host Resource Reservation: Memory=68.00MB
 Per-Host Resource Estimates: Memory=202.76MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -371,13 +371,13 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
-Per-Host Resources: mem-estimate=154.76MB mem-reservation=136.00MB
+Per-Host Resources: mem-estimate=154.76MB mem-reservation=68.00MB
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash-table-id=00
 |  hash predicates: o_custkey = c_custkey
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
-|  mem-estimate=37.38MB mem-reservation=68.00MB
+|  mem-estimate=37.38MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1 row-size=428B cardinality=1500000
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -415,7 +415,7 @@ select straight_join *
 from functional_parquet.alltypes
     left join functional_parquet.alltypestiny on alltypes.id = alltypestiny.id
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=136.00MB
+Per-Host Resource Reservation: Memory=34.00MB
 Per-Host Resource Estimates: Memory=2.03GB
 WARNING: The following tables are missing relevant table and/or column 
statistics.
 functional_parquet.alltypes, functional_parquet.alltypestiny
@@ -430,11 +430,11 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1N row-size=176B cardinality=unavailable
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
-Per-Host Resources: mem-estimate=2.02GB mem-reservation=136.00MB
+Per-Host Resources: mem-estimate=2.02GB mem-reservation=34.00MB
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: alltypes.id = alltypestiny.id
 |  fk/pk conjuncts: assumed fk/pk
-|  mem-estimate=2.00GB mem-reservation=136.00MB
+|  mem-estimate=2.00GB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1N row-size=176B cardinality=unavailable
 |
 |--03:EXCHANGE [BROADCAST]
@@ -459,7 +459,7 @@ Per-Host Resources: mem-estimate=2.02GB 
mem-reservation=136.00MB
    mem-estimate=16.00MB mem-reservation=0B
    tuple-ids=0 row-size=88B cardinality=unavailable
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=272.00MB
+Per-Host Resource Reservation: Memory=68.00MB
 Per-Host Resource Estimates: Memory=4.06GB
 WARNING: The following tables are missing relevant table and/or column 
statistics.
 functional_parquet.alltypestiny
@@ -474,12 +474,12 @@ PLAN-ROOT SINK
 |  tuple-ids=0,1N row-size=176B cardinality=unavailable
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
-Per-Host Resources: mem-estimate=4.03GB mem-reservation=272.00MB
+Per-Host Resources: mem-estimate=4.03GB mem-reservation=68.00MB
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash-table-id=00
 |  hash predicates: alltypes.id = alltypestiny.id
 |  fk/pk conjuncts: assumed fk/pk
-|  mem-estimate=2.00GB mem-reservation=136.00MB
+|  mem-estimate=2.00GB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=0,1N row-size=176B cardinality=unavailable
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -516,7 +516,7 @@ select c_nationkey, avg(c_acctbal)
 from tpch_parquet.customer
 group by c_nationkey
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=2.12MB
+Per-Host Resource Reservation: Memory=1.12MB
 Per-Host Resource Estimates: Memory=44.00MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -529,11 +529,11 @@ PLAN-ROOT SINK
 |  tuple-ids=2 row-size=10B cardinality=25
 |
 F01:PLAN FRAGMENT [HASH(c_nationkey)] hosts=1 instances=1
-Per-Host Resources: mem-estimate=10.00MB mem-reservation=2.12MB
+Per-Host Resources: mem-estimate=10.00MB mem-reservation=1.12MB
 03:AGGREGATE [FINALIZE]
 |  output: avg:merge(c_acctbal)
 |  group by: c_nationkey
-|  mem-estimate=10.00MB mem-reservation=2.12MB
+|  mem-estimate=10.00MB mem-reservation=1.12MB spill-buffer=64.00KB
 |  tuple-ids=2 row-size=10B cardinality=25
 |
 02:EXCHANGE [HASH(c_nationkey)]
@@ -545,7 +545,7 @@ Per-Host Resources: mem-estimate=34.00MB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  output: avg(c_acctbal)
 |  group by: c_nationkey
-|  mem-estimate=10.00MB mem-reservation=0B
+|  mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=10B cardinality=25
 |
 00:SCAN HDFS [tpch_parquet.customer, RANDOM]
@@ -556,7 +556,7 @@ Per-Host Resources: mem-estimate=34.00MB mem-reservation=0B
    mem-estimate=24.00MB mem-reservation=0B
    tuple-ids=0 row-size=10B cardinality=150000
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=4.25MB
+Per-Host Resource Reservation: Memory=2.25MB
 Per-Host Resource Estimates: Memory=88.00MB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -569,11 +569,11 @@ PLAN-ROOT SINK
 |  tuple-ids=2 row-size=10B cardinality=25
 |
 F01:PLAN FRAGMENT [HASH(c_nationkey)] hosts=1 instances=2
-Per-Host Resources: mem-estimate=20.00MB mem-reservation=4.25MB
+Per-Host Resources: mem-estimate=20.00MB mem-reservation=2.25MB
 03:AGGREGATE [FINALIZE]
 |  output: avg:merge(c_acctbal)
 |  group by: c_nationkey
-|  mem-estimate=10.00MB mem-reservation=2.12MB
+|  mem-estimate=10.00MB mem-reservation=1.12MB spill-buffer=64.00KB
 |  tuple-ids=2 row-size=10B cardinality=25
 |
 02:EXCHANGE [HASH(c_nationkey)]
@@ -585,7 +585,7 @@ Per-Host Resources: mem-estimate=68.00MB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  output: avg(c_acctbal)
 |  group by: c_nationkey
-|  mem-estimate=10.00MB mem-reservation=0B
+|  mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=10B cardinality=25
 |
 00:SCAN HDFS [tpch_parquet.customer, RANDOM]
@@ -603,7 +603,7 @@ from tpch_parquet.lineitem
 group by 1, 2
 having count(*) = 1
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=83.00MB
+Per-Host Resource Reservation: Memory=51.00MB
 Per-Host Resource Estimates: Memory=205.28MB
 
 F04:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -616,12 +616,12 @@ PLAN-ROOT SINK
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 F03:PLAN FRAGMENT [HASH(l_orderkey,o_orderstatus)] hosts=3 instances=3
-Per-Host Resources: mem-estimate=18.04MB mem-reservation=66.00MB
+Per-Host Resources: mem-estimate=18.04MB mem-reservation=34.00MB
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: l_orderkey, o_orderstatus
 |  having: count(*) = 1
-|  mem-estimate=18.04MB mem-reservation=66.00MB
+|  mem-estimate=18.04MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 06:EXCHANGE [HASH(l_orderkey,o_orderstatus)]
@@ -633,14 +633,14 @@ Per-Host Resources: mem-estimate=67.24MB 
mem-reservation=17.00MB
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: l_orderkey, o_orderstatus
-|  mem-estimate=54.12MB mem-reservation=0B
+|  mem-estimate=54.12MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
-|  mem-estimate=13.11MB mem-reservation=17.00MB
+|  mem-estimate=13.11MB mem-reservation=17.00MB spill-buffer=1.00MB
 |  tuple-ids=0,1 row-size=33B cardinality=5757710
 |
 |--05:EXCHANGE [HASH(o_orderkey)]
@@ -672,7 +672,7 @@ Per-Host Resources: mem-estimate=80.00MB mem-reservation=0B
    mem-estimate=80.00MB mem-reservation=0B
    tuple-ids=0 row-size=8B cardinality=6001215
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=83.00MB
+Per-Host Resource Reservation: Memory=51.00MB
 Per-Host Resource Estimates: Memory=327.24MB
 
 F04:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -685,12 +685,12 @@ PLAN-ROOT SINK
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 F03:PLAN FRAGMENT [HASH(l_orderkey,o_orderstatus)] hosts=3 instances=6
-Per-Host Resources: mem-estimate=20.00MB mem-reservation=66.00MB
+Per-Host Resources: mem-estimate=20.00MB mem-reservation=34.00MB
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: l_orderkey, o_orderstatus
 |  having: count(*) = 1
-|  mem-estimate=10.00MB mem-reservation=33.00MB
+|  mem-estimate=10.00MB mem-reservation=17.00MB spill-buffer=1.00MB
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 06:EXCHANGE [HASH(l_orderkey,o_orderstatus)]
@@ -702,7 +702,7 @@ Per-Host Resources: mem-estimate=67.24MB 
mem-reservation=17.00MB
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: l_orderkey, o_orderstatus
-|  mem-estimate=27.06MB mem-reservation=0B
+|  mem-estimate=27.06MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=2 row-size=33B cardinality=4690314
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -710,7 +710,7 @@ Per-Host Resources: mem-estimate=67.24MB 
mem-reservation=17.00MB
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
-|  mem-estimate=6.56MB mem-reservation=8.50MB
+|  mem-estimate=6.56MB mem-reservation=8.50MB spill-buffer=512.00KB
 |  tuple-ids=0,1 row-size=33B cardinality=5757710
 |
 |--F05:PLAN FRAGMENT [HASH(l_orderkey)] hosts=2 instances=4
@@ -753,7 +753,7 @@ Per-Host Resources: mem-estimate=160.00MB mem-reservation=0B
 select distinct *
 from tpch_parquet.lineitem
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=264.00MB
+Per-Host Resource Reservation: Memory=34.00MB
 Per-Host Resource Estimates: Memory=3.31GB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -766,10 +766,10 @@ PLAN-ROOT SINK
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 F01:PLAN FRAGMENT 
[HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
 hosts=3 instances=3
-Per-Host Resources: mem-estimate=1.62GB mem-reservation=264.00MB
+Per-Host Resources: mem-estimate=1.62GB mem-reservation=34.00MB
 03:AGGREGATE [FINALIZE]
 |  group by: tpch_parquet.lineitem.l_orderkey, 
tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, 
tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, 
tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, 
tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, 
tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, 
tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, 
tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, 
tpch_parquet.lineitem.l_comment
-|  mem-estimate=1.62GB mem-reservation=264.00MB
+|  mem-estimate=1.62GB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 02:EXCHANGE 
[HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
@@ -780,7 +780,7 @@ F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=1.69GB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  group by: tpch_parquet.lineitem.l_orderkey, 
tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, 
tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, 
tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, 
tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, 
tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, 
tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, 
tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, 
tpch_parquet.lineitem.l_comment
-|  mem-estimate=1.62GB mem-reservation=0B
+|  mem-estimate=1.62GB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
@@ -791,7 +791,7 @@ Per-Host Resources: mem-estimate=1.69GB mem-reservation=0B
    mem-estimate=80.00MB mem-reservation=0B
    tuple-ids=0 row-size=263B cardinality=6001215
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=528.00MB
+Per-Host Resource Reservation: Memory=68.00MB
 Per-Host Resource Estimates: Memory=6.62GB
 
 F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -804,10 +804,10 @@ PLAN-ROOT SINK
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 F01:PLAN FRAGMENT 
[HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
 hosts=3 instances=6
-Per-Host Resources: mem-estimate=3.23GB mem-reservation=528.00MB
+Per-Host Resources: mem-estimate=3.23GB mem-reservation=68.00MB
 03:AGGREGATE [FINALIZE]
 |  group by: tpch_parquet.lineitem.l_orderkey, 
tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, 
tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, 
tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, 
tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, 
tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, 
tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, 
tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, 
tpch_parquet.lineitem.l_comment
-|  mem-estimate=1.62GB mem-reservation=264.00MB
+|  mem-estimate=1.62GB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 02:EXCHANGE 
[HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
@@ -818,7 +818,7 @@ F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=3.39GB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  group by: tpch_parquet.lineitem.l_orderkey, 
tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, 
tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, 
tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, 
tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, 
tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, 
tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, 
tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, 
tpch_parquet.lineitem.l_comment
-|  mem-estimate=1.62GB mem-reservation=0B
+|  mem-estimate=1.62GB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=263B cardinality=6001215
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
@@ -834,7 +834,7 @@ select string_col, count(*)
 from functional_parquet.alltypestiny
 group by string_col
 ---- DISTRIBUTEDPLAN
-Per-Host Resource Reservation: Memory=264.00MB
+Per-Host Resource Reservation: Memory=34.00MB
 Per-Host Resource Estimates: Memory=272.00MB
 WARNING: The following tables are missing relevant table and/or column 
statistics.
 functional_parquet.alltypestiny
@@ -849,11 +849,11 @@ PLAN-ROOT SINK
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 F01:PLAN FRAGMENT [HASH(string_col)] hosts=3 instances=3
-Per-Host Resources: mem-estimate=128.00MB mem-reservation=264.00MB
+Per-Host Resources: mem-estimate=128.00MB mem-reservation=34.00MB
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: string_col
-|  mem-estimate=128.00MB mem-reservation=264.00MB
+|  mem-estimate=128.00MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 02:EXCHANGE [HASH(string_col)]
@@ -865,7 +865,7 @@ Per-Host Resources: mem-estimate=144.00MB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: string_col
-|  mem-estimate=128.00MB mem-reservation=0B
+|  mem-estimate=128.00MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 00:SCAN HDFS [functional_parquet.alltypestiny, RANDOM]
@@ -876,7 +876,7 @@ Per-Host Resources: mem-estimate=144.00MB mem-reservation=0B
    mem-estimate=16.00MB mem-reservation=0B
    tuple-ids=0 row-size=16B cardinality=unavailable
 ---- PARALLELPLANS
-Per-Host Resource Reservation: Memory=528.00MB
+Per-Host Resource Reservation: Memory=68.00MB
 Per-Host Resource Estimates: Memory=544.00MB
 WARNING: The following tables are missing relevant table and/or column 
statistics.
 functional_parquet.alltypestiny
@@ -891,11 +891,11 @@ PLAN-ROOT SINK
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 F01:PLAN FRAGMENT [HASH(string_col)] hosts=3 instances=6
-Per-Host Resources: mem-estimate=256.00MB mem-reservation=528.00MB
+Per-Host Resources: mem-estimate=256.00MB mem-reservation=68.00MB
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: string_col
-|  mem-estimate=128.00MB mem-reservation=264.00MB
+|  mem-estimate=128.00MB mem-reservation=34.00MB spill-buffer=2.00MB
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 02:EXCHANGE [HASH(string_col)]
@@ -907,7 +907,7 @@ Per-Host Resources: mem-estimate=288.00MB mem-reservation=0B
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: string_col
-|  mem-estimate=128.00MB mem-reservation=0B
+|  mem-estimate=128.00MB mem-reservation=0B spill-buffer=2.00MB
 |  tuple-ids=1 row-size=24B cardinality=unavailable
 |
 00:SCAN HDFS [functional_parquet.alltypestiny, RANDOM]

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test 
b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
index 868d6ca..4c208a4 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
@@ -154,14 +154,14 @@ select id from functional.alltypes t1 where exists (
   where t1.id = t2.id)
 ---- PLAN
 F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
-|  Per-Host Resources: mem-estimate=160.00MB mem-reservation=136.00MB
+|  Per-Host Resources: mem-estimate=160.00MB mem-reservation=1.06MB
 PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B
 |
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
-|  mem-estimate=44B mem-reservation=136.00MB
+|  mem-estimate=44B mem-reservation=1.06MB spill-buffer=64.00KB
 |  tuple-ids=0 row-size=4B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/analytic-fns.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/analytic-fns.test 
b/testdata/workloads/functional-query/queries/QueryTest/analytic-fns.test
index e697914..27459ef 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/analytic-fns.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/analytic-fns.test
@@ -1583,18 +1583,20 @@ from functional.alltypestiny order by id
 int, bigint, bigint, double
 ====
 ---- QUERY
-# Regression test for IMPALA-2265, IMPALA-2559. The max_block_mgr_memory is 
tuned to
+# Regression test for IMPALA-2265, IMPALA-2559. The buffer_pool_limit is tuned 
to
 # reproduce the issue when running this query against functional_parquet.
-SET max_block_mgr_memory=16m;
+SET default_spillable_buffer_size=8m;
+SET buffer_pool_limit=16m;
 SELECT lag(-180, 13) over (ORDER BY t1.int_col ASC, t2.int_col ASC) AS int_col
 FROM functional_parquet.alltypes t1 CROSS JOIN functional_parquet.alltypes t2 
LIMIT 10;
 ---- CATCH
-Memory limit exceeded
+Failed to get minimum memory reservation
 ====
 ---- QUERY
 # Check that the above query can succeed with the minimum buffers (3 buffers 
for sort,
-# 1 buffer for analytic).
-SET max_block_mgr_memory=32m;
+# 2 buffer for analytic).
+SET default_spillable_buffer_size=8m;
+SET buffer_pool_limit=40m;
 SELECT lag(-180, 13) over (ORDER BY t1.int_col ASC, t2.int_col ASC) AS int_col
 FROM functional_parquet.alltypes t1 CROSS JOIN functional_parquet.alltypes t2 
LIMIT 10;
 ---- TYPES

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/explain-level0.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level0.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level0.test
index 64f9b45..122d928 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level0.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level0.test
@@ -5,7 +5,7 @@ explain
 select *
 from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
 ---- RESULTS: VERIFY_IS_EQUAL
-'Per-Host Resource Reservation: Memory=136.00MB'
+'Per-Host Resource Reservation: Memory=34.00MB'
 'Per-Host Resource Estimates: Memory=476.41MB'
 ''
 'PLAN-ROOT SINK'

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
index f59962c..475758d 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
@@ -5,7 +5,7 @@ explain
 select *
 from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
 ---- RESULTS: VERIFY_IS_EQUAL
-'Per-Host Resource Reservation: Memory=136.00MB'
+'Per-Host Resource Reservation: Memory=34.00MB'
 'Per-Host Resource Estimates: Memory=476.41MB'
 ''
 'PLAN-ROOT SINK'

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
index 2736543..2fa7576 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
@@ -5,7 +5,7 @@ explain
 select *
 from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
 ---- RESULTS: VERIFY_IS_EQUAL
-'Per-Host Resource Reservation: Memory=136.00MB'
+'Per-Host Resource Reservation: Memory=34.00MB'
 'Per-Host Resource Estimates: Memory=476.41MB'
 ''
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
@@ -18,12 +18,12 @@ from tpch.lineitem join tpch.orders on l_orderkey = 
o_orderkey;
 '|  tuple-ids=0,1 row-size=454B cardinality=5757710'
 '|'
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
-'Per-Host Resources: mem-estimate=388.41MB mem-reservation=136.00MB'
+'Per-Host Resources: mem-estimate=388.41MB mem-reservation=34.00MB'
 '02:HASH JOIN [INNER JOIN, BROADCAST]'
 '|  hash predicates: l_orderkey = o_orderkey'
 '|  fk/pk conjuncts: l_orderkey = o_orderkey'
 '|  runtime filters: RF000 <- o_orderkey'
-'|  mem-estimate=300.41MB mem-reservation=136.00MB'
+'|  mem-estimate=300.41MB mem-reservation=34.00MB spill-buffer=2.00MB'
 '|  tuple-ids=0,1 row-size=454B cardinality=5757710'
 '|'
 '|--03:EXCHANGE [BROADCAST]'

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
index 31f4f5b..76d74ce 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
@@ -5,7 +5,7 @@ explain
 select *
 from tpch.lineitem join tpch.orders on l_orderkey = o_orderkey;
 ---- RESULTS: VERIFY_IS_EQUAL
-'Per-Host Resource Reservation: Memory=136.00MB'
+'Per-Host Resource Reservation: Memory=34.00MB'
 'Per-Host Resource Estimates: Memory=476.41MB'
 ''
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
@@ -18,14 +18,14 @@ from tpch.lineitem join tpch.orders on l_orderkey = 
o_orderkey;
 '     tuple-ids=0,1 row-size=454B cardinality=5757710'
 ''
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
-'Per-Host Resources: mem-estimate=388.41MB mem-reservation=136.00MB'
+'Per-Host Resources: mem-estimate=388.41MB mem-reservation=34.00MB'
 '  DATASTREAM SINK [FRAGMENT=F02, EXCHANGE=04, UNPARTITIONED]'
 '  |  mem-estimate=0B mem-reservation=0B'
 '  02:HASH JOIN [INNER JOIN, BROADCAST]'
 '  |  hash predicates: l_orderkey = o_orderkey'
 '  |  fk/pk conjuncts: l_orderkey = o_orderkey'
 '  |  runtime filters: RF000 <- o_orderkey'
-'  |  mem-estimate=300.41MB mem-reservation=136.00MB'
+'  |  mem-estimate=300.41MB mem-reservation=34.00MB spill-buffer=2.00MB'
 '  |  tuple-ids=0,1 row-size=454B cardinality=5757710'
 '  |'
 '  |--03:EXCHANGE [BROADCAST]'

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/nested-types-tpch.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/nested-types-tpch.test 
b/testdata/workloads/functional-query/queries/QueryTest/nested-types-tpch.test
index 626b315..c8a80b2 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/nested-types-tpch.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/nested-types-tpch.test
@@ -234,11 +234,10 @@ order by c_custkey
 bigint, bigint
 ====
 ---- QUERY
-# IMPALA-5446: dropped status from Sorter::Reset() when sort cannot get 
reserved buffer.
-# This query is designed to allow the initial subplan iterations to succeed, 
but have
-# later iterations fail because the aggregation outside the subplan has 
accumulated all
-# the memory.
-set max_block_mgr_memory=100m;
+# This was originally a regression test for IMPALA-5446: dropped status from
+# Sorter::Reset() when sort cannot get reserved buffer. However with the
+# IMPALA-3200 changes it now succeeds.
+set buffer_pool_limit=100m;
 select c_custkey, c_name, c_address, c_phone, c_acctbal, c_mktsegment, 
c_comment,
        o_orderdate, sum(o_totalprice), min(rnum)
 from customer c,
@@ -247,6 +246,17 @@ from customer c,
 group by 1, 2, 3, 4, 5, 6, 7, 8
 order by 9, 10 desc
 limit 10
----- CATCH
-Memory limit exceeded: Query did not have enough memory to get the minimum 
required buffers in the block manager.
+---- RESULTS
+3115,'Customer#000003115','oB 
75yHls7ptt5zCheWJLQ','22-291-864-7521',8889.56,'BUILDING','ts are quickly 
across the bold deposits. carefully spe','1998-04-23',857.71,3
+53551,'Customer#000053551','e,fT3URuJDH,tE6a6Z3Pjg0DZMFSqWbtYgd','15-429-275-5686',1137.38,'FURNITURE','
 detect evenly along the blithely pending asymptotes. furiously even notornis 
detect carefu','1992-04-18',866.90,25
+64043,'Customer#000064043','Snyi 
GOB00','22-446-332-2750',4627.24,'FURNITURE','the quickly express asymptotes 
are around the pe','1992-01-31',870.88,11
+107698,'Customer#000107698','stUoykCwpTBAO3OC3lw','33-686-199-1188',698.89,'AUTOMOBILE','
 accounts eat carefully express packages. slyly even id','1993-11-21',875.52,15
+1351,'Customer#000001351','NYMFfkNlCGoTeaDrNO9nn','11-916-210-6616',3106.00,'FURNITURE','
 accounts after the final deposits sleep fluffily ironic 
accoun','1994-01-14',877.30,13
+85468,'Customer#000085468','EuFCX4qk4k0O4bV3UHoNVBTP','23-876-106-3120',8926.31,'AUTOMOBILE','kages.
 slyly even requests according to the ironic, ironic accounts cajole 
furiou','1997-04-12',884.52,4
+148522,'Customer#000148522','PIDMm8ulW4oam3VsoZL4f 
,dpAf3LEV','16-597-824-4946',-133.27,'BUILDING','ly quickly express deposits. 
regularly regular requests cajole carefully slyly even 
noto','1995-03-20',885.75,12
+83222,'Customer#000083222','vI3tUuqtUYGPfrXAYeonVD9','27-599-263-5978',289.66,'BUILDING','ost
 quietly idle foxes. packages at the slyly pending pa','1993-05-02',891.74,5
+25090,'Customer#000025090','92GyVjZZiCBUmn','23-396-651-8663',8497.56,'BUILDING','osits.
 slyly final pinto beans sleep carefully fluffily express deposits. packages 
affix. carefully spe','1995-08-12',895.39,15
+27490,'Customer#000027490','jRzZQ1z7T,nrX5F58P,ZH','26-121-240-6744',7512.30,'AUTOMOBILE','slyly
 quickly even pinto beans: pend','1995-07-25',896.59,14
+---- TYPES
+bigint,string,string,string,decimal,string,string,string,decimal,bigint
 ====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filters_phj.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filters_phj.test
 
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filters_phj.test
index 8c8f770..66391a5 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filters_phj.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filters_phj.test
@@ -8,7 +8,7 @@
 # consumption / spilling behaviour.
 ####################################################
 
-SET MAX_BLOCK_MGR_MEMORY=275m;
+SET BUFFER_POOL_LIMIT=40m;
 SET RUNTIME_FILTER_MODE=GLOBAL;
 SET RUNTIME_FILTER_WAIT_TIME_MS=30000;
 SET RUNTIME_BLOOM_FILTER_SIZE=16M;
@@ -82,7 +82,8 @@ SET RUNTIME_FILTER_MODE=GLOBAL;
 SET RUNTIME_FILTER_WAIT_TIME_MS=30000;
 SET RUNTIME_FILTER_MIN_SIZE=128MB;
 SET RUNTIME_FILTER_MAX_SIZE=500MB;
-SET MEM_LIMIT=140MB;
+# Allocate enough memory for the join + filter + scan
+SET MEM_LIMIT=170MB;
 select STRAIGHT_JOIN * from alltypes a join [BROADCAST] alltypes b
     on a.month = b.id and b.int_col = -3
 ---- RESULTS

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
 
b/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
index d0ac79d..14ad2ed 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
@@ -16,7 +16,7 @@ row_regex: .*RowsProduced: 10.99..\W10995\W
 # Test to verify that is limit_ is correctly enforced when
 # output_batch is at AtCapacity.
 set batch_size=6;
-set max_block_mgr_memory=180m;
+set buffer_pool_limit=180m;
 select * from tpch.lineitem t1 full outer join tpch.lineitem t2 on
 t1.l_orderkey = t2.l_orderkey limit 10;
 ---- RUNTIME_PROFILE

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
 
b/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
index 74b7eee..93ed510 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
@@ -36,7 +36,7 @@ row_regex: .* SpilledRuns: [^0].*
 # Regression test for IMPALA-5554: first string column in sort tuple is null
 # on boundary of spilled block. Test does two sorts with a NULL and non-NULL
 # string column in both potential orders.
-set max_block_mgr_memory=50m;
+set buffer_pool_limit=50m;
 select *
 from (
   select *, first_value(col) over (order by sort_col) fv

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/functional-query/queries/QueryTest/spilling.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/spilling.test 
b/testdata/workloads/functional-query/queries/QueryTest/spilling.test
index 0f0e2ca..d8335c6 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/spilling.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/spilling.test
@@ -1,6 +1,6 @@
 ====
 ---- QUERY
-set max_block_mgr_memory=25m;
+set buffer_pool_limit=10m;
 select l_orderkey, count(*)
 from lineitem
 group by 1
@@ -21,15 +21,12 @@ BIGINT, BIGINT
 ---- RUNTIME_PROFILE
 # Verify that spilling and passthrough were activated.
 row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
-row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 row_regex: .*RowsPassedThrough: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test query with string grouping column and string agg columns
-# Could only get it to spill reliably with num_nodes=1.
-# TODO: revisit with new buffer pool.
+set buffer_pool_limit=10m;
 set num_nodes=1;
-set max_block_mgr_memory=25m;
 select l_returnflag, l_orderkey, avg(l_tax), min(l_shipmode)
 from lineitem
 group by 1,2
@@ -45,7 +42,7 @@ row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
 row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
-set max_block_mgr_memory=25m;
+set buffer_pool_limit=10m;
 select l_orderkey, count(*)
 from lineitem
 group by 1
@@ -65,15 +62,12 @@ order by 1 limit 10;
 BIGINT, BIGINT
 ---- RUNTIME_PROFILE
 row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
-row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 row_regex: .*RowsPassedThrough: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test query with string grouping column
-# Could only get it to spill reliably with num_nodes=1.
-# TODO: revisit with new buffer pool.
+set buffer_pool_limit=10m;
 set num_nodes=1;
-set max_block_mgr_memory=25m;
 select l_comment, count(*)
 from lineitem
 group by 1
@@ -92,10 +86,8 @@ row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test query with string grouping column and string agg columns
-# Could only get it to spill reliably with num_nodes=1.
-# TODO: revisit with new buffer pool.
+set buffer_pool_limit=10m;
 set num_nodes=1;
-set max_block_mgr_memory=25m;
 select l_returnflag, l_orderkey, round(avg(l_tax),2), min(l_shipmode)
 from lineitem
 group by 1,2
@@ -113,7 +105,7 @@ row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test with string intermediate state (avg() uses string intermediate value).
-set max_block_mgr_memory=25m;
+set buffer_pool_limit=10m;
 select l_orderkey, avg(l_orderkey)
 from lineitem
 group by 1
@@ -129,12 +121,10 @@ BIGINT, DOUBLE
 ---- RUNTIME_PROFILE
 # Verify that passthrough and spilling happened in the pre and merge agg.
 row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
-row_regex: .*NumRepartitions: .* \([1-9][0-9]*\)
 row_regex: .*RowsPassedThrough: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
-set num_nodes=0;
-set max_block_mgr_memory=100m;
+set buffer_pool_limit=15m;
 select count(l1.l_tax)
 from
 lineitem l1,
@@ -156,8 +146,7 @@ BIGINT
 row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
-set num_nodes=0;
-set max_block_mgr_memory=40m;
+set buffer_pool_limit=2m;
 select max(t1.total_count), max(t1.l_shipinstruct), max(t1.l_comment) from
 (select l_shipinstruct, l_comment, count(*) over () total_count from lineitem) 
t1
 ---- RESULTS
@@ -165,13 +154,12 @@ select max(t1.total_count), max(t1.l_shipinstruct), 
max(t1.l_comment) from
 ---- TYPES
 BIGINT, STRING, STRING
 ---- RUNTIME_PROFILE
-# Indirectly verify that the analytic spilled: if it spills a block, it must 
repin it.
-row_regex: .*PinTime: [1-9][0-9]*.*
+# Verify that the analytic spilled
+row_regex: .*PeakUnpinnedBytes: [1-9][0-9]*.*
 ====
 ---- QUERY
-# Run this query with very low memory. Since the tables are small, the PA/PHJ 
should be
-# using buffers much smaller than the io buffer.
-set max_block_mgr_memory=10m;
+# Run this query with very low memory, but enough not to spill.
+set buffer_pool_limit=20m;
 select a.int_col, count(*)
 from functional.alltypessmall a, functional.alltypessmall b, 
functional.alltypessmall c
 where a.id = b.id and b.id = c.id group by a.int_col
@@ -192,12 +180,11 @@ INT, BIGINT
 # This query is not meant to spill.
 row_regex: .*SpilledPartitions: 0 .*
 ====
----- QUERY: TPCH-Q21
+---- QUERY: TPCH-Q22
 # Adding TPCH-Q21 in the spilling test to check for IMPALA-1471 (spilling left 
anti
 # and left outer joins were returning wrong results).
 # Q21 - Suppliers Who Kept Orders Waiting Query
-set num_nodes=0;
-set max_block_mgr_memory=65m;
+set buffer_pool_limit=20m;
 select
   s_name,
   count(*) as numwait
@@ -347,8 +334,7 @@ row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test aggregation spill with group_concat distinct
-set num_nodes=1;
-set max_block_mgr_memory=100m;
+set buffer_pool_limit=50m;
 select l_orderkey, count(*), group_concat(distinct l_linestatus, '|')
 from lineitem
 group by 1
@@ -376,7 +362,6 @@ row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
 # nodes. CastToChar will do "local" memory allocation. Without the fix of
 # IMPALA-2612, the peak memory consumption will be higher.
 set mem_limit=800m;
-set num_nodes=1;
 set num_scanner_threads=1;
 select count(distinct concat(cast(l_comment as char(120)), cast(l_comment as 
char(120)),
                              cast(l_comment as char(120)), cast(l_comment as 
char(120)),
@@ -394,8 +379,7 @@ row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
 # Test sort with small char column materialized by exprs.
 # Set low memory limit to force spilling.
 # IMPALA-3332: comparator makes local allocations that cause runaway memory 
consumption.
-set num_nodes=0;
-set max_block_mgr_memory=4m;
+set buffer_pool_limit=4m;
 set mem_limit=200m;
 set disable_outermost_topn=1;
 select cast(l_comment as char(50))
@@ -432,9 +416,8 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # Test sort with small input char column materialized before sort.
-set num_nodes=0;
 set mem_limit=200m;
-set max_block_mgr_memory=4m;
+set buffer_pool_limit=4m;
 set disable_outermost_topn=1;
 select char_col
 from (select cast(l_comment as char(50)) char_col
@@ -472,9 +455,8 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 ---- QUERY
 # Test sort with large input char column materialized before sort.
 # Set low memory limit to force spilling.
-set num_nodes=0;
 set mem_limit=200m;
-set max_block_mgr_memory=4m;
+set buffer_pool_limit=4m;
 set disable_outermost_topn=1;
 select char_col
 from (select cast(l_comment as char(200)) char_col
@@ -512,8 +494,7 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 ---- QUERY
 # Test sort with varchar column materialized by exprs.
 # Set low memory limit to force spilling.
-set num_nodes=0;
-set max_block_mgr_memory=4m;
+set buffer_pool_limit=4m;
 # IMPALA-3332: comparator makes local allocations that cause runaway memory 
consumption.
 set mem_limit=200m;
 set disable_outermost_topn=1;
@@ -552,9 +533,8 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 ---- QUERY
 # Test sort with input varchar column materialized before sort.
 # Set low memory limit to force spilling.
-set num_nodes=0;
 set mem_limit=200m;
-set max_block_mgr_memory=4m;
+set buffer_pool_limit=4m;
 set disable_outermost_topn=1;
 select char_col
 from (select cast(l_comment as varchar(50)) char_col
@@ -592,9 +572,7 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 ---- QUERY
 # IMPALA-1346/IMPALA-1546: fix sorter memory management so that it can complete
 # successfully when in same pipeline as a spilling join.
-set num_nodes=0;
-set mem_limit=200m;
-set max_block_mgr_memory=50m;
+set buffer_pool_limit=50m;
 set disable_outermost_topn=1;
 select * from lineitem
   inner join orders on l_orderkey = o_orderkey
@@ -632,7 +610,7 @@ row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
 # Tests for the case where a spilled partition has 0 probe rows and so we 
don't build the
 # hash table in a partitioned hash join.
 # INNER JOIN
-set max_block_mgr_memory=10m;
+set buffer_pool_limit=10m;
 select straight_join count(*)
 from
 lineitem a, lineitem b
@@ -648,7 +626,7 @@ row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # spilled partition with 0 probe rows, NULL AWARE LEFT ANTI JOIN
-set max_block_mgr_memory=10m;
+set buffer_pool_limit=10m;
 select straight_join count(*)
 from
 lineitem a
@@ -664,7 +642,7 @@ row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # spilled partition with 0 probe rows, RIGHT OUTER JOIN
-set max_block_mgr_memory=10m;
+set buffer_pool_limit=10m;
 select straight_join count(*)
 from
 supplier right outer join lineitem on s_suppkey = l_suppkey
@@ -678,7 +656,7 @@ row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
 ====
 ---- QUERY
 # spilled partition with 0 probe rows, RIGHT ANTI JOIN
-set max_block_mgr_memory=30m;
+set buffer_pool_limit=30m;
 with x as (select * from supplier limit 10)
 select straight_join count(*)
 from
@@ -698,7 +676,7 @@ row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
 #   where l1.l_quantity = 31.0 and l1.l_tax = 0.03 and l1.l_orderkey <= 100000
 # order by l_orderkey, l_partkey, l_suppkey, l_linenumber
 # limit 5
-set max_block_mgr_memory=7m;
+set buffer_pool_limit=7m;
 set num_nodes=1;
 select straight_join l.*
 from
@@ -726,3 +704,16 @@ 
bigint,bigint,bigint,int,decimal,decimal,decimal,decimal,string,string,string,st
 
1382,156162,6163,5,31.00,37762.96,0.07,0.03,'R','F','1993-10-26','1993-10-15','1993-11-09','TAKE
 BACK RETURN','FOB','hely regular dependencies. f'
 
1509,186349,3904,6,31.00,44495.54,0.04,0.03,'A','F','1993-07-14','1993-08-21','1993-08-06','COLLECT
 COD','SHIP','ic deposits cajole carefully. quickly bold '
 ====
+---- QUERY
+# Test aggregation with minimum required reservation to exercise IMPALA-2708.
+# Merge aggregation requires 17 buffers. The buffer size is 256k for this test.
+set buffer_pool_limit=4352k;
+select count(*)
+from (select distinct * from orders) t
+---- TYPES
+BIGINT
+---- RESULTS
+1500000
+---- RUNTIME_PROFILE
+row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
+====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/targeted-stress/queries/agg_stress.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/targeted-stress/queries/agg_stress.test 
b/testdata/workloads/targeted-stress/queries/agg_stress.test
index b2d45a9..a6657ba 100644
--- a/testdata/workloads/targeted-stress/queries/agg_stress.test
+++ b/testdata/workloads/targeted-stress/queries/agg_stress.test
@@ -1,7 +1,7 @@
 ====
 ---- QUERY
 # This memory limit causes a spill to happen for this query
-set max_block_mgr_memory=250m;
+set buffer_pool_limit=250m;
 # This query forces many joins and aggregations with spilling
 # and can expose race conditions in the spilling code if run in parallel
 select

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/testdata/workloads/tpch/queries/insert_parquet.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/tpch/queries/insert_parquet.test 
b/testdata/workloads/tpch/queries/insert_parquet.test
index 4707b7b..862548e 100644
--- a/testdata/workloads/tpch/queries/insert_parquet.test
+++ b/testdata/workloads/tpch/queries/insert_parquet.test
@@ -67,6 +67,8 @@ insert overwrite table test_insert_huge_vals
 ---- QUERY
 # Verify the values written to test_insert_huge_vals were as expected by 
counting
 # the results of an inner join of that table with the same query used in the 
insert.
+# Increase spillable buffer size to fit the large values on right side of hash 
join.
+set min_spillable_buffer_size=1m;
 select count(*) from
   (select cast(l_orderkey as string) s from tpch.lineitem union
    select group_concat(concat(s_name, s_address, s_phone)) from tpch.supplier 
union

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/comparison/discrepancy_searcher.py
----------------------------------------------------------------------
diff --git a/tests/comparison/discrepancy_searcher.py 
b/tests/comparison/discrepancy_searcher.py
index ccbdd66..e0e1725 100755
--- a/tests/comparison/discrepancy_searcher.py
+++ b/tests/comparison/discrepancy_searcher.py
@@ -315,7 +315,7 @@ class QueryExecutor(object):
         SET 
DISABLE_STREAMING_PREAGGREGATIONS={disable_streaming_preaggregations};
         SET DISABLE_UNSAFE_SPILLS={disable_unsafe_spills};
         SET EXEC_SINGLE_NODE_ROWS_THRESHOLD={exec_single_node_rows_threshold};
-        SET MAX_BLOCK_MGR_MEMORY={max_block_mgr_memory};
+        SET BUFFER_POOL_LIMIT={buffer_pool_limit};
         SET MAX_IO_BUFFERS={max_io_buffers};
         SET MAX_SCAN_RANGE_LENGTH={max_scan_range_length};
         SET NUM_NODES={num_nodes};
@@ -333,7 +333,7 @@ class QueryExecutor(object):
             disable_streaming_preaggregations=choice((0, 1)),
             disable_unsafe_spills=choice((0, 1)),
             exec_single_node_rows_threshold=randint(1, 100000000),
-            max_block_mgr_memory=randint(1, 100000000),
+            buffer_pool_limit=randint(1, 100000000),
             max_io_buffers=randint(1, 100000000),
             max_scan_range_length=randint(1, 100000000),
             num_nodes=randint(3, 3),

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/custom_cluster/test_scratch_disk.py
----------------------------------------------------------------------
diff --git a/tests/custom_cluster/test_scratch_disk.py 
b/tests/custom_cluster/test_scratch_disk.py
index 7e02de5..579ca1e 100644
--- a/tests/custom_cluster/test_scratch_disk.py
+++ b/tests/custom_cluster/test_scratch_disk.py
@@ -40,7 +40,7 @@ class TestScratchDir(CustomClusterTestSuite):
   # Block manager memory limit that is low enough to force Impala to spill to 
disk when
   # executing spill_query and high enough that we can execute in_mem_query 
without
   # spilling.
-  max_block_mgr_memory = "64m"
+  buffer_pool_limit = "64m"
 
   def count_nonempty_dirs(self, dirs):
     count = 0
@@ -87,7 +87,7 @@ class TestScratchDir(CustomClusterTestSuite):
     self.assert_impalad_log_contains("INFO", "Using scratch directory ",
                                     expected_count=1)
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     impalad = self.cluster.get_any_impalad()
     client = impalad.service.create_beeswax_client()
     self.execute_query_expect_success(client, self.spill_query, exec_option)
@@ -100,7 +100,7 @@ class TestScratchDir(CustomClusterTestSuite):
     self.assert_impalad_log_contains("WARNING",
         "Running without spill to disk: no scratch directories provided\.")
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     impalad = self.cluster.get_any_impalad()
     client = impalad.service.create_beeswax_client()
     # Expect spill to disk to fail
@@ -121,7 +121,7 @@ class TestScratchDir(CustomClusterTestSuite):
     self.assert_impalad_log_contains("WARNING", "Could not remove and recreate 
directory "
             + ".*: cannot use it for scratch\. Error was: .*", 
expected_count=5)
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     impalad = self.cluster.get_any_impalad()
     client = impalad.service.create_beeswax_client()
     # Expect spill to disk to fail
@@ -144,7 +144,7 @@ class TestScratchDir(CustomClusterTestSuite):
         + "Encountered exception while verifying existence of directory path",
         expected_count=5)
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     impalad = self.cluster.get_any_impalad()
     client = impalad.service.create_beeswax_client()
     # Expect spill to disk to fail
@@ -164,7 +164,7 @@ class TestScratchDir(CustomClusterTestSuite):
     self.assert_impalad_log_contains("INFO", "Using scratch directory ",
                                     expected_count=len(dirs))
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     # Trigger errors when writing the first two directories.
     shutil.rmtree(dirs[0]) # Remove the first directory.
     # Make all subdirectories in the second directory non-writable.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/custom_cluster/test_spilling.py
----------------------------------------------------------------------
diff --git a/tests/custom_cluster/test_spilling.py 
b/tests/custom_cluster/test_spilling.py
deleted file mode 100644
index 774e83f..0000000
--- a/tests/custom_cluster/test_spilling.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import pytest
-from copy import deepcopy
-
-from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.test_dimensions import (
-    create_single_exec_option_dimension,
-    create_parquet_dimension)
-
-class TestSpilling(CustomClusterTestSuite):
-  @classmethod
-  def get_workload(self):
-    return 'functional-query'
-
-  @classmethod
-  def add_test_dimensions(cls):
-    super(TestSpilling, cls).add_test_dimensions()
-    cls.ImpalaTestMatrix.clear_constraints()
-    cls.ImpalaTestMatrix.add_dimension(create_parquet_dimension('tpch'))
-    cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
-
-  # Reduce the IO read size. This reduces the memory required to trigger 
spilling.
-  @pytest.mark.execute_serially
-  @CustomClusterTestSuite.with_args(
-      impalad_args="--read_size=200000",
-      catalogd_args="--load_catalog_in_background=false")
-  def test_spilling(self, vector):
-    new_vector = deepcopy(vector)
-    # remove this. the test cases set this explicitly.
-    del new_vector.get_value('exec_option')['num_nodes']
-    self.run_test_case('QueryTest/spilling', new_vector)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_cancellation.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_cancellation.py 
b/tests/query_test/test_cancellation.py
index bb1fc0d..91be5d4 100644
--- a/tests/query_test/test_cancellation.py
+++ b/tests/query_test/test_cancellation.py
@@ -52,7 +52,7 @@ DEBUG_ACTIONS = [None, 'WAIT']
 # Extra dimensions to test order by without limit
 SORT_QUERY = 'select * from lineitem order by l_orderkey'
 SORT_CANCEL_DELAY = range(6, 10)
-SORT_BLOCK_MGR_LIMIT = ['0', '300m'] # Test spilling and non-spilling sorts.
+SORT_BUFFER_POOL_LIMIT = ['0', '300m'] # Test spilling and non-spilling sorts.
 
 class TestCancellation(ImpalaTestSuite):
   @classmethod
@@ -71,7 +71,7 @@ class TestCancellation(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         ImpalaTestDimension('action', *DEBUG_ACTIONS))
     cls.ImpalaTestMatrix.add_dimension(
-        ImpalaTestDimension('max_block_mgr_memory', 0))
+        ImpalaTestDimension('buffer_pool_limit', 0))
 
     cls.ImpalaTestMatrix.add_constraint(
         lambda v: v.get_value('query_type') != 'CTAS' or (\
@@ -121,8 +121,8 @@ class TestCancellation(ImpalaTestSuite):
     debug_action = '0:GETNEXT:' + action if action != None else ''
     vector.get_value('exec_option')['debug_action'] = debug_action
 
-    vector.get_value('exec_option')['max_block_mgr_memory'] =\
-        vector.get_value('max_block_mgr_memory')
+    vector.get_value('exec_option')['buffer_pool_limit'] =\
+        vector.get_value('buffer_pool_limit')
 
     # Execute the query multiple times, cancelling it each time.
     for i in xrange(NUM_CANCELATION_ITERATIONS):
@@ -216,7 +216,7 @@ class TestCancellationFullSort(TestCancellation):
     cls.ImpalaTestMatrix.add_dimension(
         ImpalaTestDimension('cancel_delay', *SORT_CANCEL_DELAY))
     cls.ImpalaTestMatrix.add_dimension(
-        ImpalaTestDimension('max_block_mgr_memory', *SORT_BLOCK_MGR_LIMIT))
+        ImpalaTestDimension('buffer_pool_limit', *SORT_BUFFER_POOL_LIMIT))
     cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('action', None))
     cls.ImpalaTestMatrix.add_constraint(lambda v:\
        v.get_value('table_format').file_format =='parquet' and\

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_mem_usage_scaling.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_mem_usage_scaling.py 
b/tests/query_test/test_mem_usage_scaling.py
index e6eccf9..bbdc771 100644
--- a/tests/query_test/test_mem_usage_scaling.py
+++ b/tests/query_test/test_mem_usage_scaling.py
@@ -82,7 +82,8 @@ class TestExprMemUsage(ImpalaTestSuite):
 
 class TestLowMemoryLimits(ImpalaTestSuite):
   '''Super class for the memory limit tests with the TPC-H and TPC-DS 
queries'''
-  EXPECTED_ERROR_MSG = "Memory limit exceeded"
+  EXPECTED_ERROR_MSGS = ["Memory limit exceeded",
+      "Failed to get minimum memory reservation"]
 
   def low_memory_limit_test(self, vector, tpch_query, limit, 
xfail_mem_limit=None):
     mem = vector.get_value('mem_limit')
@@ -93,28 +94,36 @@ class TestLowMemoryLimits(ImpalaTestSuite):
     # If memory limit larger than the minimum threshold, then it is not 
expected to fail.
     expects_error = mem < limit
     new_vector = copy(vector)
-    new_vector.get_value('exec_option')['mem_limit'] = str(mem) + "m"
+    exec_options = new_vector.get_value('exec_option')
+    exec_options['mem_limit'] = str(mem) + "m"
+
+    # Reduce the page size to better exercise page boundary logic.
+    exec_options['default_spillable_buffer_size'] = "256k"
     try:
       self.run_test_case(tpch_query, new_vector)
     except ImpalaBeeswaxException as e:
       if not expects_error and not xfail_mem_limit: raise
-      assert TestLowMemoryLimits.EXPECTED_ERROR_MSG in str(e)
+      found_expected_error = False
+      for error_msg in TestLowMemoryLimits.EXPECTED_ERROR_MSGS:
+        if error_msg in str(e): found_expected_error = True
+      assert found_expected_error, str(e)
       if not expects_error and xfail_mem_limit:
         pytest.xfail(xfail_mem_limit)
 
 
 class TestTpchMemLimitError(TestLowMemoryLimits):
-  # TODO: After we stabilize the mem usage test, we should move this test to 
exhaustive.
+  # TODO: consider moving this test to exhaustive.
   # The mem limits that will be used.
-  MEM_IN_MB = [20, 140, 180, 275, 450, 700, 980]
+  MEM_IN_MB = [20, 140, 180, 220, 275, 450, 700]
 
   # Different values of mem limits and minimum mem limit (in MBs) each query 
is expected
-  # to run without problem. Those values were determined by manual testing.
-  MIN_MEM_FOR_TPCH = { 'Q1' : 140, 'Q2' : 120, 'Q3' : 240, 'Q4' : 125, 'Q5' : 
235,\
-                       'Q6' : 25, 'Q7' : 265, 'Q8' : 250, 'Q9' : 400, 'Q10' : 
240,\
-                       'Q11' : 110, 'Q12' : 125, 'Q13' : 110, 'Q14' : 229, 
'Q15' : 125,\
-                       'Q16' : 125, 'Q17' : 130, 'Q18' : 475, 'Q19' : 240, 
'Q20' : 250,\
-                       'Q21' : 620, 'Q22' : 125}
+  # to run without problem. These were determined using the 
query_runtime_info.json file
+  # produced by the stress test (i.e. concurrent_select.py).
+  MIN_MEM_FOR_TPCH = { 'Q1' : 125, 'Q2' : 125, 'Q3' : 112, 'Q4' : 137, 'Q5' : 
137,\
+                       'Q6' : 25, 'Q7' : 200, 'Q8' : 125, 'Q9' : 200, 'Q10' : 
162,\
+                       'Q11' : 112, 'Q12' : 150, 'Q13' : 125, 'Q14' : 125, 
'Q15' : 125,\
+                       'Q16' : 137, 'Q17' : 137, 'Q18' : 196, 'Q19' : 112, 
'Q20' : 162,\
+                       'Q21' : 187, 'Q22' : 125}
 
   @classmethod
   def get_workload(self):

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_nested_types.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_nested_types.py 
b/tests/query_test/test_nested_types.py
index 96a170b..bb74faa 100644
--- a/tests/query_test/test_nested_types.py
+++ b/tests/query_test/test_nested_types.py
@@ -27,7 +27,6 @@ from tests.common.skip import (
     SkipIfS3,
     SkipIfADLS,
     SkipIfLocal)
-
 from tests.util.filesystem_utils import WAREHOUSE, get_fs_path
 
 class TestNestedTypes(ImpalaTestSuite):

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_scratch_limit.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_scratch_limit.py 
b/tests/query_test/test_scratch_limit.py
index 6a13318..6e19bb5 100644
--- a/tests/query_test/test_scratch_limit.py
+++ b/tests/query_test/test_scratch_limit.py
@@ -28,7 +28,7 @@ class TestScratchLimit(ImpalaTestSuite):
 
   # Block manager memory limit that is low enough to
   # force Impala to spill to disk when executing 'spill_query'
-  max_block_mgr_memory = "64m"
+  buffer_pool_limit = "64m"
 
   @classmethod
   def get_workload(self):
@@ -48,7 +48,7 @@ class TestScratchLimit(ImpalaTestSuite):
     its required scratch space which in this case is 128m.
     """
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     exec_option['scratch_limit'] = '500m'
     self.execute_query_expect_success(self.client, self.spill_query, 
exec_option)
 
@@ -58,7 +58,7 @@ class TestScratchLimit(ImpalaTestSuite):
     its required scratch space which in this case is 128m.
     """
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     exec_option['scratch_limit'] = '24m'
     expected_error = 'Scratch space limit of %s bytes exceeded'
     scratch_limit_in_bytes = 24 * 1024 * 1024
@@ -74,7 +74,7 @@ class TestScratchLimit(ImpalaTestSuite):
     zero which means no scratch space can be allocated.
     """
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     exec_option['scratch_limit'] = '0'
     self.execute_query_expect_failure(self.spill_query, exec_option)
 
@@ -83,7 +83,7 @@ class TestScratchLimit(ImpalaTestSuite):
     Query runs to completion with a scratch Limit of -1 means default/no limit.
     """
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     exec_option['scratch_limit'] = '-1'
     self.execute_query_expect_success(self.client, self.spill_query, 
exec_option)
 
@@ -92,7 +92,7 @@ class TestScratchLimit(ImpalaTestSuite):
     Query runs to completion with the default setting of no scratch limit.
     """
     exec_option = vector.get_value('exec_option')
-    exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory
+    exec_option['buffer_pool_limit'] = self.buffer_pool_limit
     self.execute_query_expect_success(self.client, self.spill_query, 
exec_option)
 
   def test_with_zero_scratch_limit_no_memory_limit(self, vector):

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_sort.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_sort.py b/tests/query_test/test_sort.py
index b048c9f..df95ddd 100644
--- a/tests/query_test/test_sort.py
+++ b/tests/query_test/test_sort.py
@@ -15,6 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
+from copy import copy
+
 from tests.common.impala_test_suite import ImpalaTestSuite
 
 def transpose_results(result, map_fn=lambda x: x):
@@ -46,7 +48,7 @@ class TestQueryFullSort(ImpalaTestSuite):
        takes about a minute"""
     query = """select l_comment, l_partkey, l_orderkey, l_suppkey, l_commitdate
             from lineitem order by l_comment limit 100000"""
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     exec_option['disable_outermost_topn'] = 1
     table_format = vector.get_value('table_format')
 
@@ -63,16 +65,18 @@ class TestQueryFullSort(ImpalaTestSuite):
     query = """select o_orderdate, o_custkey, o_comment
       from orders
       order by o_orderdate"""
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     table_format = vector.get_value('table_format')
 
-    max_block_mgr_memory_values = ['-1', '48M'] # Unlimited and minimum memory.
+    # The below memory value assume 8M pages.
+    exec_option['default_spillable_buffer_size'] = '8M'
+    buffer_pool_limit_values = ['-1', '48M'] # Unlimited and minimum memory.
     if self.exploration_strategy() == 'exhaustive' and \
         table_format.file_format == 'parquet':
       # Test some intermediate values for parquet on exhaustive.
-      max_block_mgr_memory_values += ['64M', '128M', '256M']
-    for max_block_mgr_memory in max_block_mgr_memory_values:
-      exec_option['max_block_mgr_memory'] = max_block_mgr_memory
+      buffer_pool_limit_values += ['64M', '128M', '256M']
+    for buffer_pool_limit in buffer_pool_limit_values:
+      exec_option['buffer_pool_limit'] = buffer_pool_limit
       result = transpose_results(self.execute_query(
         query, exec_option, table_format=table_format).data)
       assert(result[0] == sorted(result[0]))
@@ -83,7 +87,7 @@ class TestQueryFullSort(ImpalaTestSuite):
     query = """select o1.o_orderdate, o2.o_custkey, o1.o_comment from orders 
o1 join
     orders o2 on (o1.o_orderkey = o2.o_orderkey) order by o1.o_orderdate limit 
100000"""
 
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     exec_option['disable_outermost_topn'] = 1
     exec_option['mem_limit'] = "1200m"
     table_format = vector.get_value('table_format')
@@ -97,7 +101,7 @@ class TestQueryFullSort(ImpalaTestSuite):
     select * from orders union all select * from orders) as i
     order by o_orderdate limit 100000"""
 
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     exec_option['disable_outermost_topn'] = 1
     exec_option['mem_limit'] = "3000m"
     table_format = vector.get_value('table_format')
@@ -120,7 +124,7 @@ class TestQueryFullSort(ImpalaTestSuite):
       select * from lineitem limit 300000) t
     order by l_orderkey"""
 
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     exec_option['disable_outermost_topn'] = 1
     # Run with a single scanner thread so that the input doesn't get reordered.
     exec_option['num_nodes'] = "1"
@@ -145,9 +149,9 @@ class TestQueryFullSort(ImpalaTestSuite):
     limit 100000
     """
 
-    exec_option = vector.get_value('exec_option')
+    exec_option = copy(vector.get_value('exec_option'))
     exec_option['disable_outermost_topn'] = 1
-    exec_option['max_block_mgr_memory'] = "256m"
+    exec_option['buffer_pool_limit'] = "256m"
     exec_option['num_nodes'] = "1"
     table_format = vector.get_value('table_format')
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a98b90bd/tests/query_test/test_spilling.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_spilling.py 
b/tests/query_test/test_spilling.py
new file mode 100644
index 0000000..e2d5141
--- /dev/null
+++ b/tests/query_test/test_spilling.py
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tests.common.impala_test_suite import ImpalaTestSuite
+from tests.common.test_dimensions import 
(create_exec_option_dimension_from_dict,
+    create_parquet_dimension)
+
+class TestSpilling(ImpalaTestSuite):
+  @classmethod
+  def get_workload(self):
+    return 'functional-query'
+
+  @classmethod
+  def add_test_dimensions(cls):
+    super(TestSpilling, cls).add_test_dimensions()
+    cls.ImpalaTestMatrix.clear_constraints()
+    cls.ImpalaTestMatrix.add_dimension(create_parquet_dimension('tpch'))
+    # Tests are calibrated so that they can execute and spill with this page 
size.
+    cls.ImpalaTestMatrix.add_dimension(
+        
create_exec_option_dimension_from_dict({'default_spillable_buffer_size' : 
['256k']}))
+
+  def test_spilling(self, vector):
+    self.run_test_case('QueryTest/spilling', vector)

Reply via email to