http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/queries/clientpositive/results_cache_capacity.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_capacity.q 
b/ql/src/test/queries/clientpositive/results_cache_capacity.q
new file mode 100644
index 0000000..9f54577
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/results_cache_capacity.q
@@ -0,0 +1,52 @@
+
+set hive.query.results.cache.enabled=true;
+
+-- Allow results cache to hold entries up to 125 bytes
+-- The single row queries are small enough to fit in the cache (103 bytes)
+-- But the cache is only large enough to hold up to 2 entries at that size.
+-- This may need to be adjusted if the sizes below change
+set hive.query.results.cache.max.size=250;
+set hive.query.results.cache.max.entry.size=125;
+
+
+select key, count(*) from src where key = 0 group by key;
+set test.comment=Q1 should be cached;
+set test.comment;
+explain
+select key, count(*) from src where key = 0 group by key;
+
+
+select key, count(*) from src where key = 2 group by key;
+set test.comment=Q2 should now be cached;
+set test.comment;
+explain
+select key, count(*) from src where key = 2 group by key;
+
+set test.comment=Q1 should still be cached;
+set test.comment;
+explain
+select key, count(*) from src where key = 0 group by key;
+
+-- Add another query to the cache. Cache not large enough to hold all 3 
queries.
+-- Due to LRU (Q1 last looked up), Q2 should no longer be in the cache.
+select key, count(*) from src where key = 4 group by key;
+set test.comment=Q3 should now be cached;
+set test.comment;
+explain
+select key, count(*) from src where key = 4 group by key;
+
+set test.comment=Q1 should still be cached;
+set test.comment;
+explain
+select key, count(*) from src where key = 0 group by key;
+
+set test.comment=Q2 should no longer be in the cache;
+set test.comment;
+explain
+select key, count(*) from src where key = 2 group by key;
+
+-- Query should not be cached because it exceeds the max entry size (183 
bytes).
+select key, count(*) from src where key < 10 group by key;
+set test.comment=Q4 is too large to be cached;
+explain
+select key, count(*) from src where key < 10 group by key;

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/queries/clientpositive/results_cache_lifetime.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_lifetime.q 
b/ql/src/test/queries/clientpositive/results_cache_lifetime.q
new file mode 100644
index 0000000..60ffe96
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/results_cache_lifetime.q
@@ -0,0 +1,14 @@
+
+set hive.query.results.cache.enabled=true;
+set hive.query.results.cache.max.entry.lifetime=2;
+
+-- This query used the cache from results_cache_1.q. Load it up.
+select count(*) from src a join src b on (a.key = b.key);
+
+-- Make sure we are past the cache entry lifetime
+select reflect("java.lang.Thread", 'sleep', cast(2000 as bigint));
+
+set test.comment="Cached entry should be expired - query should not use cache";
+set test.comment;
+explain
+select count(*) from src a join src b on (a.key = b.key);

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/queries/clientpositive/results_cache_temptable.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_temptable.q 
b/ql/src/test/queries/clientpositive/results_cache_temptable.q
new file mode 100644
index 0000000..9e0de76
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/results_cache_temptable.q
@@ -0,0 +1,42 @@
+set hive.query.results.cache.enabled=true;
+
+create table rct (key string, value string);
+load data local inpath '../../data/files/kv1.txt' overwrite into table rct;
+
+create table rct_part (key string, value string) partitioned by (ds string);
+load data local inpath '../../data/files/kv1.txt' overwrite into table 
rct_part partition (ds="2008-04-08");
+load data local inpath '../../data/files/kv1.txt' overwrite into table 
rct_part partition (ds="2008-04-09");
+
+create temporary table tmptab as select * from src;
+
+select count(*) from tmptab where key = 0;
+set test.comment="Query involving temp tables should not be added to the 
cache";
+set test.comment;
+explain
+select count(*) from tmptab where key = 0;
+
+-- A cached query should not be used if one of the tables used now resolves to 
a temp table.
+select count(*) from rct where key = 0;
+set test.comment="Query should use the cache";
+set test.comment;
+explain
+select count(*) from rct where key = 0;
+-- Create temp table with same name, which takes precedence over the non-temp 
table.
+create temporary table rct as select * from tmptab;
+set test.comment="Cached query does not get used now that it resolves to a 
temp table";
+set test.comment;
+explain
+select count(*) from rct where key = 0;
+
+-- Try with partitioned table
+select count(*) from rct_part where ds="2008-04-08" and key = 0;
+set test.comment="Query should use the cache";
+set test.comment;
+explain
+select count(*) from rct_part where ds="2008-04-08" and key = 0;
+-- Create temp table with same name, which takes precedence over the non-temp 
table.
+create temporary table rct_part as select key, value, "2008-04-08" as ds from 
tmptab;
+set test.comment="Cached query does not get used now that it resolves to a 
temp table";
+set test.comment;
+explain
+select count(*) from rct_part where ds="2008-04-08" and key = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/queries/clientpositive/results_cache_with_masking.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_with_masking.q 
b/ql/src/test/queries/clientpositive/results_cache_with_masking.q
new file mode 100644
index 0000000..b4fcdd5
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/results_cache_with_masking.q
@@ -0,0 +1,17 @@
+
+set hive.mapred.mode=nonstrict;
+set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+
+set hive.query.results.cache.enabled=true;
+
+create table masking_test as select cast(key as int) as key, value from src;
+
+explain
+select key, count(*) from masking_test group by key;
+select key, count(*) from masking_test group by key;
+
+-- This time we should use the cache
+explain
+select key, count(*) from masking_test group by key;
+select key, count(*) from masking_test group by key;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/results/clientpositive/llap/results_cache_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/results_cache_1.q.out 
b/ql/src/test/results/clientpositive/llap/results_cache_1.q.out
new file mode 100644
index 0000000..ebd2300
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/results_cache_1.q.out
@@ -0,0 +1,584 @@
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic 
stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic 
stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 809 Data size: 6472 Basic stats: 
COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1028
+test.comment="Cache should be used for this query"
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+POSTHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+1028
+test.comment="Cache is disabled, should not be used here."
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic 
stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic 
stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 809 Data size: 6472 Basic stats: 
COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: create database db1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:db1
+POSTHOOK: query: create database db1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:db1
+PREHOOK: query: use db1
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:db1
+POSTHOOK: query: use db1
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:db1
+PREHOOK: query: create table src as select key, value from default.src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:db1
+PREHOOK: Output: db1@src
+POSTHOOK: query: create table src as select key, value from default.src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:db1
+POSTHOOK: Output: db1@src
+POSTHOOK: Lineage: src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, 
comment:default), ]
+POSTHOOK: Lineage: src.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
+test.comment="Same query string, but different current database. Cache should 
not be used since unqualified tablenames resolve to different tables"
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 87584 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 475 Data size: 83204 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 475 Data size: 83204 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 475 Data size: 83204 Basic 
stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 87584 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 475 Data size: 83204 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 475 Data size: 83204 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 475 Data size: 83204 Basic 
stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 522 Data size: 91524 Basic stats: 
COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      val_0
+0      val_0
+0      val_0
+2      val_2
+test.comment="Union all. Cache should be used now"
+PREHOOK: query: explain
+select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+POSTHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+0      val_0
+0      val_0
+0      val_0
+2      val_2
+PREHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      0
+2      2
+4      4
+5      5
+8      8
+9      9
+test.comment="CTE. Cache should be used now"
+PREHOOK: query: explain
+with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+POSTHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+0      0
+2      2
+4      4
+5      5
+8      8
+9      9
+PREHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+8      val_8
+2      val_2
+4      val_4
+5      val_5
+9      val_9
+test.comment="Intersect/Except. Cache should be used now"
+PREHOOK: query: explain
+with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+POSTHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+8      val_8
+2      val_2
+4      val_4
+5      val_5
+9      val_9
+PREHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1     2       1
+ 1     2       1
+1      2       1
+1      12      1
+test.comment="Semijoin. Cache should be used now"
+PREHOOK: query: explain
+select a, c, count(*)  from (select key as a, c_int+1 as b, sum(c_int) as c 
from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or 
cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float 
> 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b 
desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on 
cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a 
> 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a, c, count(*)  from (select key as a, c_int+1 as b, sum(c_int) as c 
from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or 
cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float 
> 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b 
desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on 
cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a 
> 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+POSTHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+ 1     2       1
+ 1     2       1
+1      2       1
+1      12      1

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/results/clientpositive/results_cache_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/results_cache_1.q.out 
b/ql/src/test/results/clientpositive/results_cache_1.q.out
new file mode 100644
index 0000000..e3359d4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/results_cache_1.q.out
@@ -0,0 +1,579 @@
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
+          Group By Operator
+            aggregations: count()
+            mode: hash
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1028
+test.comment="Cache should be used for this query"
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1028
+test.comment="Cache is disabled, should not be used here."
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
+          Group By Operator
+            aggregations: count()
+            mode: hash
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: create database db1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:db1
+POSTHOOK: query: create database db1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:db1
+PREHOOK: query: use db1
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:db1
+POSTHOOK: query: use db1
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:db1
+PREHOOK: query: create table src as select key, value from default.src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:db1
+PREHOOK: Output: db1@src
+POSTHOOK: query: create table src as select key, value from default.src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:db1
+POSTHOOK: Output: db1@src
+POSTHOOK: Lineage: src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, 
comment:default), ]
+POSTHOOK: Lineage: src.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
+test.comment="Same query string, but different current database. Cache should 
not be used since unqualified tablenames resolve to different tables"
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
+          Group By Operator
+            aggregations: count()
+            mode: hash
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      val_0
+0      val_0
+0      val_0
+2      val_2
+test.comment="Union all. Cache should be used now"
+PREHOOK: query: explain
+select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src where key = 0
+union all
+select * from src where key = 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      val_0
+0      val_0
+0      val_0
+2      val_2
+PREHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      0
+2      2
+4      4
+5      5
+8      8
+9      9
+test.comment="CTE. Cache should be used now"
+PREHOOK: query: explain
+with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key from q2 ),
+q2 as ( select key, value from src where key < 10 )
+select * from q1 a, q1 b where a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      0
+2      2
+4      4
+5      5
+8      8
+9      9
+PREHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2      val_2
+4      val_4
+5      val_5
+8      val_8
+9      val_9
+test.comment="Intersect/Except. Cache should be used now"
+PREHOOK: query: explain
+with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select distinct key, value from src ),
+q2 as ( select key, value from src where key < 10 ),
+q3 as ( select key, value from src where key = 0 )
+select * from q1 intersect all select * from q2 except all select * from q3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2      val_2
+4      val_4
+5      val_5
+8      val_8
+9      val_9
+PREHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1     2       1
+ 1     2       1
+1      2       1
+1      12      1
+test.comment="Semijoin. Cache should be used now"
+PREHOOK: query: explain
+select a, c, count(*)  from (select key as a, c_int+1 as b, sum(c_int) as c 
from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or 
cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float 
> 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b 
desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on 
cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a 
> 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a, c, count(*)  from (select key as a, c_int+1 as b, sum(c_int) as c 
from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or 
cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float 
> 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b 
desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on 
cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a 
> 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, c, count(*)  from (select key as a, c_int+1 as b, 
sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 
or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having 
cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key 
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join 
cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  
having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1     2       1
+ 1     2       1
+1      2       1
+1      12      1

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/results/clientpositive/results_cache_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/results_cache_2.q.out 
b/ql/src/test/results/clientpositive/results_cache_2.q.out
new file mode 100644
index 0000000..ab1b0de
--- /dev/null
+++ b/ql/src/test/results/clientpositive/results_cache_2.q.out
@@ -0,0 +1,176 @@
+PREHOOK: query: explain
+select key, value from src where key=0
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, value from src where key=0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+          Filter Operator
+            predicate: (UDFToDouble(key) = 0.0) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
+
+PREHOOK: query: select key, value from src where key=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from src where key=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      val_0
+0      val_0
+0      val_0
+test.comment=Query only requires fetch task - should not use results cache
+PREHOOK: query: explain
+select key, value from src where key=0
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, value from src where key=0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+          Filter Operator
+            predicate: (UDFToDouble(key) = 0.0) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
+
+PREHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+NULL   10
+test.comment=This query should use the cache
+PREHOOK: query: explain
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select c1, count(*)
+from (select current_timestamp c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select c1, count(*)
+from (select current_timestamp c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2012-01-01 01:02:03    10
+test.comment=Queries using non-deterministic functions should not use results 
cache
+PREHOOK: query: explain
+select c1, count(*)
+from (select current_timestamp c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select c1, count(*)
+from (select current_timestamp c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: (UDFToDouble(key) < 10.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  keys: 2012-01-01 01:02:03.0 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: timestamp)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: timestamp)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column 
stats: NONE
+          Select Operator
+            expressions: 2012-01-01 01:02:03.0 (type: timestamp), _col1 (type: 
bigint)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/results/clientpositive/results_cache_capacity.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/results_cache_capacity.q.out 
b/ql/src/test/results/clientpositive/results_cache_capacity.q.out
new file mode 100644
index 0000000..695d47d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/results_cache_capacity.q.out
@@ -0,0 +1,238 @@
+PREHOOK: query: select key, count(*) from src where key = 0 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from src where key = 0 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      3
+test.comment=Q1 should be cached
+PREHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select key, count(*) from src where key = 2 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from src where key = 2 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2      1
+test.comment=Q2 should now be cached
+PREHOOK: query: explain
+select key, count(*) from src where key = 2 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 2 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+test.comment=Q1 should still be cached
+PREHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+PREHOOK: query: select key, count(*) from src where key = 4 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from src where key = 4 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+4      1
+test.comment=Q3 should now be cached
+PREHOOK: query: explain
+select key, count(*) from src where key = 4 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 4 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+test.comment=Q1 should still be cached
+PREHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 0 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+      Cached Query Result: true
+
+test.comment=Q2 should no longer be in the cache
+PREHOOK: query: explain
+select key, count(*) from src where key = 2 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key = 2 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: (UDFToDouble(key) = 2.0) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE 
Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE 
Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, count(*) from src where key < 10 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from src where key < 10 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0      3
+2      1
+4      1
+5      3
+8      1
+9      1
+PREHOOK: query: explain
+select key, count(*) from src where key < 10 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*) from src where key < 10 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: (UDFToDouble(key) < 10.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/1733a371/ql/src/test/results/clientpositive/results_cache_lifetime.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/results_cache_lifetime.q.out 
b/ql/src/test/results/clientpositive/results_cache_lifetime.q.out
new file mode 100644
index 0000000..ea5d7e0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/results_cache_lifetime.q.out
@@ -0,0 +1,112 @@
+PREHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1028
+PREHOOK: query: select reflect("java.lang.Thread", 'sleep', cast(2000 as 
bigint))
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select reflect("java.lang.Thread", 'sleep', cast(2000 as 
bigint))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+null
+test.comment="Cached entry should be expired - query should not use cache"
+PREHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from src a join src b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
+          Group By Operator
+            aggregations: count()
+            mode: hash
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

Reply via email to