http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out 
b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
new file mode 100644
index 0000000..cf20851
--- /dev/null
+++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
@@ -0,0 +1,683 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+                tag: 0
+                value expressions: _col1 (type: string)
+                auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), ds (type: 
string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+                tag: 1
+                value expressions: _col1 (type: string), _col2 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+          filter mappings:
+            1 [0, 1]
+          filter predicates:
+            0 
+            1 {(VALUE._col1 = '2008-04-08')}
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE 
Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 
20.0) and (UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: 
boolean)
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE 
Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17     val_17  17      val_17
+17     val_17  17      val_17
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+19     val_19  19      val_19
+19     val_19  19      val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = 
'2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = 
'2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
+                  tag: 0
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
+                  tag: 1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 
20.0)) (type: boolean)
+            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE 
Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = 
'2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = 
'2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17     val_17  17      val_17
+17     val_17  17      val_17
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+18     val_18  18      val_18
+19     val_19  19      val_19
+19     val_19  19      val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out 
b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
deleted file mode 100644
index 825e668..0000000
--- a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
+++ /dev/null
@@ -1,70 +0,0 @@
-PREHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS parquet_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_table
-POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE 
[(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, 
comment:), ]
-PREHOOK: query: SELECT * FROM parquet_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-{"key2":"val2","key1":null}
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: DROP TABLE parquet_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_table
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: DROP TABLE parquet_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_table
-POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out 
b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
deleted file mode 100644
index 1462cc2..0000000
--- a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
+++ /dev/null
@@ -1,70 +0,0 @@
-PREHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS parquet_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_table
-POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE 
[(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, 
comment:), ]
-PREHOOK: query: SELECT * FROM parquet_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-{"key1":null,"key2":"val2"}
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: DROP TABLE parquet_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_table
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: DROP TABLE parquet_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_table
-POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/parquet_map_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.out 
b/ql/src/test/results/clientpositive/parquet_map_null.q.out
new file mode 100644
index 0000000..d1357c1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/parquet_map_null.q.out
@@ -0,0 +1,68 @@
+PREHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
+
+DROP TABLE IF EXISTS avro_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- This test attempts to write a parquet table from an avro 
table that contains map null values
+
+DROP TABLE IF EXISTS avro_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS parquet_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) 
STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' 
OVERWRITE INTO TABLE avro_table
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@avro_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_table
+POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM 
avro_table
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@avro_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_table
+POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE 
[(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, 
comment:), ]
+PREHOOK: query: SELECT * FROM parquet_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_table
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM parquet_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_table
+#### A masked pattern was here ####
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+{"key1":null,"key2":"val2"}
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+PREHOOK: query: DROP TABLE avro_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@avro_table
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: DROP TABLE avro_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@avro_table
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: DROP TABLE parquet_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@parquet_table
+PREHOOK: Output: default@parquet_table
+POSTHOOK: query: DROP TABLE parquet_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@parquet_table
+POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out 
b/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
deleted file mode 100644
index dda4adc..0000000
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
+++ /dev/null
@@ -1,13 +0,0 @@
-PREHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT 
STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator 
Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 
5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select 
Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE","children":{"Group By 
Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: 
COMPLETE","children":{"Reduce Output Operator":{"sort 
order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce 
Operator Tree:":{"Group By 
Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output 
Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic 
stats: COMPLETE Column stats: COMPLETE","table:":{"input 
format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output 
format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch
 Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out 
b/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
deleted file mode 100644
index dda4adc..0000000
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
+++ /dev/null
@@ -1,13 +0,0 @@
-PREHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT 
STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator 
Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 
5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select 
Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE","children":{"Group By 
Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: 
COMPLETE","children":{"Reduce Output Operator":{"sort 
order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce 
Operator Tree:":{"Group By 
Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output 
Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic 
stats: COMPLETE Column stats: COMPLETE","table:":{"input 
format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output 
format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch
 Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/plan_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.out 
b/ql/src/test/results/clientpositive/plan_json.q.out
new file mode 100644
index 0000000..98c6626
--- /dev/null
+++ b/ql/src/test/results/clientpositive/plan_json.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
+
+
+EXPLAIN FORMATTED SELECT count(1) FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- explain plan json:  the query gets the formatted json 
output of the query plan of the hive query
+
+
+EXPLAIN FORMATTED SELECT count(1) FROM src
+POSTHOOK: type: QUERY
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT 
STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator 
Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 
5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select 
Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE","children":{"Group By 
Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: 
COMPLETE","children":{"Reduce Output Operator":{"sort 
order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce 
Operator Tree:":{"Group By 
Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output 
Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic 
stats: COMPLETE Column stats: COMPLETE","table:":{"input 
format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output 
format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch
 Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out 
b/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
deleted file mode 100644
index b3a58d0..0000000
--- a/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
+++ /dev/null
@@ -1,238 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL 
SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: 
string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: 
string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), 
KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), 
KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   2       val_2
-0      val_0   2       val_2
-0      val_0   2       val_2
-0      val_0   4       val_4
-0      val_0   4       val_4
-0      val_0   4       val_4
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   8       val_8
-0      val_0   8       val_8
-0      val_0   8       val_8
-0      val_0   9       val_9
-0      val_0   9       val_9
-0      val_0   9       val_9
-2      val_2   0       val_0
-2      val_2   0       val_0
-2      val_2   0       val_0
-2      val_2   2       val_2
-2      val_2   4       val_4
-2      val_2   5       val_5
-2      val_2   5       val_5
-2      val_2   5       val_5
-2      val_2   8       val_8
-2      val_2   9       val_9
-4      val_4   0       val_0
-4      val_4   0       val_0
-4      val_4   0       val_0
-4      val_4   2       val_2
-4      val_4   4       val_4
-4      val_4   5       val_5
-4      val_4   5       val_5
-4      val_4   5       val_5
-4      val_4   8       val_8
-4      val_4   9       val_9
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   2       val_2
-5      val_5   2       val_2
-5      val_5   2       val_2
-5      val_5   4       val_4
-5      val_5   4       val_4
-5      val_5   4       val_4
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   8       val_8
-5      val_5   8       val_8
-5      val_5   8       val_8
-5      val_5   9       val_9
-5      val_5   9       val_9
-5      val_5   9       val_9
-8      val_8   0       val_0
-8      val_8   0       val_0
-8      val_8   0       val_0
-8      val_8   2       val_2
-8      val_8   4       val_4
-8      val_8   5       val_5
-8      val_8   5       val_5
-8      val_8   5       val_5
-8      val_8   8       val_8
-8      val_8   9       val_9
-9      val_9   0       val_0
-9      val_9   0       val_0
-9      val_9   0       val_0
-9      val_9   2       val_2
-9      val_9   4       val_4
-9      val_9   5       val_5
-9      val_9   5       val_5
-9      val_9   5       val_5
-9      val_9   8       val_8
-9      val_9   9       val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out 
b/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
deleted file mode 100644
index 7acd108..0000000
--- a/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
+++ /dev/null
@@ -1,238 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL 
SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: 
string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: 
string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), 
KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), 
KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   0       val_0
-0      val_0   2       val_2
-0      val_0   2       val_2
-0      val_0   2       val_2
-0      val_0   4       val_4
-0      val_0   4       val_4
-0      val_0   4       val_4
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   5       val_5
-0      val_0   8       val_8
-0      val_0   8       val_8
-0      val_0   8       val_8
-0      val_0   9       val_9
-0      val_0   9       val_9
-0      val_0   9       val_9
-2      val_2   0       val_0
-2      val_2   0       val_0
-2      val_2   0       val_0
-2      val_2   2       val_2
-2      val_2   4       val_4
-2      val_2   5       val_5
-2      val_2   5       val_5
-2      val_2   5       val_5
-2      val_2   8       val_8
-2      val_2   9       val_9
-4      val_4   0       val_0
-4      val_4   0       val_0
-4      val_4   0       val_0
-4      val_4   2       val_2
-4      val_4   4       val_4
-4      val_4   5       val_5
-4      val_4   5       val_5
-4      val_4   5       val_5
-4      val_4   8       val_8
-4      val_4   9       val_9
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   0       val_0
-5      val_5   2       val_2
-5      val_5   2       val_2
-5      val_5   2       val_2
-5      val_5   4       val_4
-5      val_5   4       val_4
-5      val_5   4       val_4
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   5       val_5
-5      val_5   8       val_8
-5      val_5   8       val_8
-5      val_5   8       val_8
-5      val_5   9       val_9
-5      val_5   9       val_9
-5      val_5   9       val_9
-8      val_8   0       val_0
-8      val_8   0       val_0
-8      val_8   0       val_0
-8      val_8   2       val_2
-8      val_8   4       val_4
-8      val_8   5       val_5
-8      val_8   5       val_5
-8      val_8   5       val_5
-8      val_8   8       val_8
-8      val_8   9       val_9
-9      val_9   0       val_0
-9      val_9   0       val_0
-9      val_9   0       val_0
-9      val_9   2       val_2
-9      val_9   4       val_4
-9      val_9   5       val_5
-9      val_9   5       val_5
-9      val_9   5       val_5
-9      val_9   8       val_8
-9      val_9   9       val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.out 
b/ql/src/test/results/clientpositive/spark/join0.q.out
index 56b154f..bc98bb4 100644
--- a/ql/src/test/results/clientpositive/spark/join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/join0.q.out
@@ -1,5 +1,7 @@
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a 
cross product
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
   (SELECT * FROM src WHERE src.key < 10) src1 
@@ -7,7 +9,9 @@ SELECT src1.key as k1, src1.value as v1,
   (SELECT * FROM src WHERE src.key < 10) src2
   SORT BY k1, v1, k2, v2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
   (SELECT * FROM src WHERE src.key < 10) src1 
@@ -24,7 +28,7 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL 
SORT, 1)
-        Reducer 3 <- Reducer 2 (SORT, 1)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 4)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -64,9 +68,9 @@ STAGE PLANS:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
-                condition expressions:
-                  0 {VALUE._col0} {VALUE._col1}
-                  1 {VALUE._col0} {VALUE._col1}
+                keys:
+                  0 
+                  1 
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
                 Reduce Output Operator
@@ -83,8 +87,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 182 Data size: 1939 Basic stats: 
COMPLETE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out 
b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
deleted file mode 100644
index 3040544..0000000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
+++ /dev/null
@@ -1,252 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 1
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Static Partition Specification: ds=2008-04-08/hr=11/
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          properties:
-                            bucket_count -1
-                            columns key,value
-                            columns.comments 
-                            columns.types string:string
-#### A masked pattern was here ####
-                            name default.list_bucketing_static_part
-                            partition_columns ds/hr
-                            partition_columns.types string:string
-                            serialization.ddl struct 
list_bucketing_static_part { string key, string value}
-                            serialization.format 1
-                            serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                          serde: 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.list_bucketing_static_part
-                      TotalFiles: 1
-                      GatherStats: true
-                      MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string 
key, string value}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition 
(ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition 
(ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part 
PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part 
PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition 
(ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition 
(ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name             data_type               comment             
-                
-key                    string                                      
-value                  string                                      
-                
-# Partition Information                 
-# col_name             data_type               comment             
-                
-ds                     string                                      
-hr                     string                                      
-                
-# Detailed Partition Information                
-Partition Value:       [2008-04-08, 11]         
-Database:              default                  
-Table:                 list_bucketing_static_part       
-#### A masked pattern was here ####
-Partition Parameters:           
-       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
-       numFiles                4                   
-       numRows                 500                 
-       rawDataSize             4812                
-       totalSize               5520                
-#### A masked pattern was here ####
-                
-# Storage Information           
-SerDe Library:         org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe    
 
-InputFormat:           org.apache.hadoop.hive.ql.io.RCFileInputFormat   
-OutputFormat:          org.apache.hadoop.hive.ql.io.RCFileOutputFormat  
-Compressed:            No                       
-Num Buckets:           -1                       
-Bucket Columns:        []                       
-Sort Columns:          []                       
-Stored As SubDirectories:      Yes                      
-Skewed Columns:        [key]                    
-Skewed Values:         [[484], [51], [103]]     
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:        
{[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, 
[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, 
[51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51}      
-Storage Desc Params:            
-       serialization.format    1                   

Reply via email to