Modified: 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
 Tue Apr 14 14:47:30 2015
@@ -14,12 +14,12 @@ select count(*) from acid where ds='2008
 insert into table acid partition(ds='2008-04-08') values("foo", "bar");
 select count(*) from acid where ds='2008-04-08';
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08';
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08';
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08';
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08';
 select count(*) from acid where ds='2008-04-08';
 
-explain update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08');
-update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08');
+explain update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08');
+update acid set value = 'bar' where key = 'foo' and ds in ('2008-04-08');
 select count(*) from acid where ds in ('2008-04-08');
 
 delete from acid where key = 'foo' and ds='2008-04-08';
@@ -36,12 +36,12 @@ select count(*) from acid where ds='2008
 insert into table acid partition(ds='2008-04-08') values("foo", "bar");
 select count(*) from acid where ds='2008-04-08';
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08';
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08';
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08';
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08';
 select count(*) from acid where ds='2008-04-08';
 
-explain update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08');
-update acid set key = 'foo' where value = 'bar' and ds in ('2008-04-08');
+explain update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08');
+update acid set value = 'bar' where key = 'foo' and ds in ('2008-04-08');
 select count(*) from acid where ds in ('2008-04-08');
 
 delete from acid where key = 'foo' and ds='2008-04-08';
@@ -58,12 +58,12 @@ select count(*) from acid where ds='2008
 insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar");
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11;
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr>=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr>=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11;
 select count(*) from acid where ds='2008-04-08' and hr>=11;
 
 delete from acid where key = 'foo' and ds='2008-04-08' and hr=11;
@@ -80,12 +80,12 @@ select count(*) from acid where ds='2008
 insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar");
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11;
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr>=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr>=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11;
 select count(*) from acid where ds='2008-04-08' and hr>=11;
 
 delete from acid where key = 'foo' and ds='2008-04-08' and hr=11;
@@ -103,12 +103,12 @@ select count(*) from acid where ds='2008
 insert into table acid partition(ds='2008-04-08',hr=11) values("foo", "bar");
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11;
 select count(*) from acid where ds='2008-04-08' and hr=11;
 
-explain update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' 
and hr>=11;
-update acid set key = 'foo' where value = 'bar' and ds='2008-04-08' and hr>=11;
+explain update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' 
and hr>=11;
+update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11;
 select count(*) from acid where ds='2008-04-08' and hr>=11;
 
 delete from acid where key = 'foo' and ds='2008-04-08' and hr=11;

Modified: 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_all_types.q
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_all_types.q?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_all_types.q
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_all_types.q
 Tue Apr 14 14:47:30 2015
@@ -5,6 +5,7 @@ set hive.enforce.bucketing=true;
 create table acid_uat(ti tinyint,
                  si smallint,
                  i int,
+                 j int,
                  bi bigint,
                  f float,
                  d double,
@@ -20,6 +21,7 @@ insert into table acid_uat
     select ctinyint,
            csmallint,
            cint,
+           cint j,
            cbigint,
            cfloat,
            cdouble,
@@ -37,7 +39,7 @@ select * from acid_uat order by i;
 update acid_uat set
     ti = 1,
     si = 2,
-    i = 3,
+    j = 3,
     bi = 4,
     f = 3.14,
     d = 6.28,

Modified: 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_tmp_table.q
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_tmp_table.q?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_tmp_table.q
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/queries/clientpositive/update_tmp_table.q
 Tue Apr 14 14:47:30 2015
@@ -8,7 +8,7 @@ insert into table acid_utt select cint,
 
 select a,b from acid_utt order by a;
 
-update acid_utt set b = 'fred' where b = '0ruyd6Y50JpdGRf6HqD';
+update acid_utt set a = 'fred' where b = '0ruyd6Y50JpdGRf6HqD';
 
 select * from acid_utt order by a;
 

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
 Tue Apr 14 14:47:30 2015
@@ -18,6 +18,4 @@ PREHOOK: query: alter table aa set serde
 PREHOOK: type: ALTERTABLE_SERDEPROPERTIES
 PREHOOK: Input: default@aa
 PREHOOK: Output: default@aa
-FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Unclosed character class near index 7
-[^\](.*)
-       ^
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. at least one column must be specified 
for the table

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out
 Tue Apr 14 14:47:30 2015
@@ -1,10 +1,10 @@
 PREHOOK: query: -- check update without update priv
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc 
TBLPROPERTIES ('transactional'='true')
+create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored 
as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@auth_noupd
 POSTHOOK: query: -- check update without update priv
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc 
TBLPROPERTIES ('transactional'='true')
+create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored 
as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@auth_noupd

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/annotate_stats_part.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/annotate_stats_part.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/annotate_stats_part.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/annotate_stats_part.q.out
 Tue Apr 14 14:47:30 2015
@@ -481,106 +481,70 @@ POSTHOOK: query: -- This is to test filt
 explain select locid from loc_orc where locid>0 and year='2001'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: loc_orc
-            Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Filter Operator
-              predicate: (locid > 0) (type: boolean)
-              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-              Select Operator
-                expressions: locid (type: int)
-                outputColumnNames: _col0
-                Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: loc_orc
+          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column 
stats: COMPLETE
+          Filter Operator
+            predicate: (locid > 0) (type: boolean)
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
+            Select Operator
+              expressions: locid (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+              ListSink
 
 PREHOOK: query: explain select locid,year from loc_orc where locid>0 and 
year='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select locid,year from loc_orc where locid>0 and 
year='2001'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: loc_orc
-            Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Filter Operator
-              predicate: (locid > 0) (type: boolean)
-              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-              Select Operator
-                expressions: locid (type: int), '2001' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: loc_orc
+          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column 
stats: COMPLETE
+          Filter Operator
+            predicate: (locid > 0) (type: boolean)
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
+            Select Operator
+              expressions: locid (type: int), '2001' (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
+              ListSink
 
 PREHOOK: query: explain select * from (select locid,year from loc_orc) test 
where locid>0 and year='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from (select locid,year from loc_orc) test 
where locid>0 and year='2001'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: loc_orc
-            Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Filter Operator
-              predicate: (locid > 0) (type: boolean)
-              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-              Select Operator
-                expressions: locid (type: int), '2001' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: loc_orc
+          Statistics: Num rows: 7 Data size: 678 Basic stats: COMPLETE Column 
stats: COMPLETE
+          Filter Operator
+            predicate: (locid > 0) (type: boolean)
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
+            Select Operator
+              expressions: locid (type: int), '2001' (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE 
Column stats: COMPLETE
+              ListSink
 

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update.q.out
 Tue Apr 14 14:47:30 2015
@@ -1,12 +1,12 @@
 PREHOOK: query: -- current user has been set (comment line before the set cmd 
is resulting in parse error!!)
 
-CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc 
TBLPROPERTIES ('transactional'='true')
+CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as 
orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_auth_up
 POSTHOOK: query: -- current user has been set (comment line before the set cmd 
is resulting in parse error!!)
 
-CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc 
TBLPROPERTIES ('transactional'='true')
+CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as 
orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_auth_up
@@ -52,11 +52,11 @@ default     t_auth_up                       user1   USER    
SELECT  tr
 default        t_auth_up                       user1   USER    UPDATE  true    
-1      user1
 default        t_auth_up                       userWIns        USER    SELECT  
false   -1      user1
 default        t_auth_up                       userWIns        USER    UPDATE  
false   -1      user1
-PREHOOK: query: update t_auth_up set i = 0 where i > 0
+PREHOOK: query: update t_auth_up set j = 0 where i > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t_auth_up
 PREHOOK: Output: default@t_auth_up
-POSTHOOK: query: update t_auth_up set i = 0 where i > 0
+POSTHOOK: query: update t_auth_up set j = 0 where i > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t_auth_up
 POSTHOOK: Output: default@t_auth_up

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update_own_table.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update_own_table.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update_own_table.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/authorization_update_own_table.q.out
 Tue Apr 14 14:47:30 2015
@@ -1,16 +1,16 @@
-PREHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets 
stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: query: create table auth_noupd(i int, j int) clustered by (i) into 2 
buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@auth_noupd
-POSTHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 
buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: query: create table auth_noupd(i int, j int) clustered by (i) into 2 
buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@auth_noupd
-PREHOOK: query: update auth_noupd set i = 0 where i > 0
+PREHOOK: query: update auth_noupd set j = 0 where i > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@auth_noupd
 PREHOOK: Output: default@auth_noupd
-POSTHOOK: query: update auth_noupd set i = 0 where i > 0
+POSTHOOK: query: update auth_noupd set j = 0 where i > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@auth_noupd
 POSTHOOK: Output: default@auth_noupd

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
 Tue Apr 14 14:47:30 2015
@@ -65,9 +65,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 1001
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -81,18 +81,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string)
+                outputColumnNames: _col0, _col2
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string)
+                  value expressions: _col2 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           File Output Operator
             compressed: false
@@ -117,12 +117,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
 PREHOOK: Output: default@acid@ds=2008-04-08
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
@@ -138,9 +138,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 1001
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds 
in ('2008-04-08')
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds 
in ('2008-04-08')
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds in ('2008-04-08')
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds in ('2008-04-08')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -154,18 +154,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
ds (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string), 
ds (type: string)
+                outputColumnNames: _col0, _col2, _col3
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string), _col3 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), VALUE._col2 (type: string)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), VALUE._col2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           File Output Operator
             compressed: false
@@ -190,12 +190,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08')
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
 PREHOOK: Output: default@acid@ds=2008-04-08
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08')
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
@@ -303,9 +303,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 1001
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -319,18 +319,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string)
+                outputColumnNames: _col0, _col2
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string)
+                  value expressions: _col2 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           File Output Operator
             compressed: false
@@ -355,12 +355,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
 PREHOOK: Output: default@acid@ds=2008-04-08
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08'
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
@@ -376,9 +376,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 1001
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and ds 
in ('2008-04-08')
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds 
in ('2008-04-08')
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds in ('2008-04-08')
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds in ('2008-04-08')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -392,18 +392,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
ds (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string), 
ds (type: string)
+                outputColumnNames: _col0, _col2, _col3
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string), _col3 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), VALUE._col2 (type: string)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), VALUE._col2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           File Output Operator
             compressed: false
@@ -428,12 +428,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08')
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
 PREHOOK: Output: default@acid@ds=2008-04-08
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and ds in 
('2008-04-08')
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and ds in 
('2008-04-08')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
@@ -547,9 +547,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -563,18 +563,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string)
+                outputColumnNames: _col0, _col2
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string)
+                  value expressions: _col2 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string), 11 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           File Output Operator
             compressed: false
@@ -600,12 +600,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
@@ -621,9 +621,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -637,18 +637,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
hr (type: int)
-                outputColumnNames: _col0, _col1, _col4
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string), 
hr (type: int)
+                outputColumnNames: _col0, _col2, _col4
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string), _col4 (type: int)
+                  value expressions: _col2 (type: string), _col4 (type: int)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col3 (type: 
int)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string), VALUE._col3 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           File Output Operator
             compressed: false
@@ -674,14 +674,14 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=12
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=12
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
@@ -799,9 +799,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -815,18 +815,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string)
+                outputColumnNames: _col0, _col2
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string)
+                  value expressions: _col2 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string), 11 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           File Output Operator
             compressed: false
@@ -852,12 +852,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
@@ -873,9 +873,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -889,18 +889,18 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
hr (type: int)
-                outputColumnNames: _col0, _col1, _col4
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'bar' (type: string), 
hr (type: int)
+                outputColumnNames: _col0, _col2, _col4
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                   sort order: +
                   Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                  value expressions: _col1 (type: string), _col4 (type: int)
+                  value expressions: _col2 (type: string), _col4 (type: int)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: 
string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col3 (type: 
int)
+          expressions: KEY.reducesinkkey0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
VALUE._col1 (type: string), '2008-04-08' (type: string), VALUE._col3 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           File Output Operator
             compressed: false
@@ -926,14 +926,14 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=12
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=12
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
@@ -1051,9 +1051,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1067,9 +1067,9 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
value (type: string), ds (type: string), hr (type: int)
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), 
'bar' (type: string), ds (type: string), hr (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
@@ -1104,12 +1104,12 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
@@ -1125,9 +1125,9 @@ POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 501
-PREHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
-POSTHOOK: query: explain update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1141,9 +1141,9 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (value = 'bar') (type: boolean)
+              predicate: (key = 'foo') (type: boolean)
               Select Operator
-                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 
value (type: string), ds (type: string), hr (type: int)
+                expressions: ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), 
'bar' (type: string), ds (type: string), hr (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Reduce Output Operator
                   key expressions: _col0 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
@@ -1178,14 +1178,14 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-PREHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+PREHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Input: default@acid@ds=2008-04-08/hr=12
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=11
 PREHOOK: Output: default@acid@ds=2008-04-08/hr=12
-POSTHOOK: query: update acid set key = 'foo' where value = 'bar' and 
ds='2008-04-08' and hr>=11
+POSTHOOK: query: update acid set value = 'bar' where key = 'foo' and 
ds='2008-04-08' and hr>=11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/index_stale_partitioned.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
 Tue Apr 14 14:47:30 2015
@@ -80,37 +80,25 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key  = 86 AND foo = 'bar'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: temp
-            filterExpr: ((UDFToDouble(key) = 86.0) and (foo = 'bar')) (type: 
boolean)
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(key) = 86.0) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: '86' (type: string), val (type: string), 'bar' 
(type: string)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: temp
+          filterExpr: ((UDFToDouble(key) = 86.0) and (foo = 'bar')) (type: 
boolean)
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+          Filter Operator
+            predicate: (UDFToDouble(key) = 86.0) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: '86' (type: string), val (type: string), 'bar' 
(type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: SELECT * FROM temp WHERE key  = 86 AND foo = 'bar'
 PREHOOK: type: QUERY

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input42.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input42.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input42.q.out 
(original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input42.q.out 
Tue Apr 14 14:47:30 2015
@@ -1187,52 +1187,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (UDFToDouble(key) < 200.0) (type: boolean)
-              Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 333 Data size: 3537 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 333 Data size: 3537 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3
-                        columns.types string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: hr=11
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1276,9 +1238,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-#### A masked pattern was here ####
           Partition
-            base file name: hr=12
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1322,15 +1282,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [a]
-        /srcpart/ds=2008-04-08/hr=12 [a]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: a
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (UDFToDouble(key) < 200.0) (type: boolean)
+            Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select * from srcpart a where a.ds='2008-04-08' and key < 200
 PREHOOK: type: QUERY
@@ -1759,52 +1724,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (rand(100) < 0.1) (type: boolean)
-              Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 333 Data size: 3537 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 333 Data size: 3537 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3
-                        columns.types string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: hr=11
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1848,9 +1775,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-#### A masked pattern was here ####
           Partition
-            base file name: hr=12
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1894,15 +1819,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [a]
-        /srcpart/ds=2008-04-08/hr=12 [a]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: a
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (rand(100) < 0.1) (type: boolean)
+            Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select * from srcpart a where a.ds='2008-04-08' and rand(100) 
< 0.1
 PREHOOK: type: QUERY

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input_part9.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input_part9.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input_part9.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/input_part9.q.out
 Tue Apr 14 14:47:30 2015
@@ -38,52 +38,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: x
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3
-                        columns.types string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: hr=11
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -127,9 +89,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-#### A masked pattern was here ####
           Partition
-            base file name: hr=12
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -173,15 +133,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.srcpart
             name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [x]
-        /srcpart/ds=2008-04-08/hr=12 [x]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: x
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: key is not null (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string), 
'2008-04-08' (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: SELECT x.* FROM SRCPART x WHERE key IS NOT NULL AND ds = 
'2008-04-08'
 PREHOOK: type: QUERY

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
 Tue Apr 14 14:47:30 2015
@@ -433,52 +433,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_dynamic_part
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (key = '484') (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: '484' (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1
-                        columns.types string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: key=484
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -522,14 +484,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.list_bucketing_dynamic_part
             name: default.list_bucketing_dynamic_part
-      Truncated Path -> Alias:
-        /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484 
[list_bucketing_dynamic_part]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: list_bucketing_dynamic_part
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (key = '484') (type: boolean)
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key, value from list_bucketing_dynamic_part where 
ds='2008-04-08' and hr='11' and key = "484"
 PREHOOK: type: QUERY

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
 Tue Apr 14 14:47:30 2015
@@ -305,52 +305,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_static_part
-            Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (value = 'val_466') (type: boolean)
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: key (type: string), 'val_466' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2406 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 250 Data size: 2406 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1
-                        columns.types string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: value=val_466
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -394,14 +356,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.list_bucketing_static_part
             name: default.list_bucketing_static_part
-      Truncated Path -> Alias:
-        /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 
[list_bucketing_static_part]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: list_bucketing_static_part
+          Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (value = 'val_466') (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: key (type: string), 'val_466' (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key, value from list_bucketing_static_part where 
ds='2008-04-08' and hr='11' and value = "val_466"
 PREHOOK: type: QUERY

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
 Tue Apr 14 14:47:30 2015
@@ -318,52 +318,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '466') and (col4 = 'val_466')) (type: 
boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '466' (type: string), col3 
(type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '11' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types 
string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: col4=val_466
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -407,14 +369,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.list_bucketing_mul_col
             name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        /list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466 
[list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '466' (type: string), col3 
(type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select * from list_bucketing_mul_col 
 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
@@ -476,52 +444,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '382') and (col4 = 'val_382')) (type: 
boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '382' (type: string), col3 
(type: string), 'val_382' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '11' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types 
string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -565,14 +495,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.list_bucketing_mul_col
             name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        
/list_bucketing_mul_col/ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
 [list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '382') and (col4 = 'val_382')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '382' (type: string), col3 
(type: string), 'val_382' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select * from list_bucketing_mul_col 
 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"

Modified: 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
URL: 
http://svn.apache.org/viewvc/hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out?rev=1673437&r1=1673436&r2=1673437&view=diff
==============================================================================
--- 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
 (original)
+++ 
hive/branches/hbase-metastore/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
 Tue Apr 14 14:47:30 2015
@@ -318,52 +318,14 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '466') and (col4 = 'val_466')) (type: 
boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '466' (type: string), col3 
(type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '2013-01-23+18:00:99' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: 
COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types 
string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels 
true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: col4=val_466
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -407,14 +369,20 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.list_bucketing_mul_col
             name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        
/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466
 [list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE 
Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '466' (type: string), col3 
(type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' 
(type: string), '2013-01-23+18:00:99' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE 
Column stats: NONE
+              ListSink
 
 PREHOOK: query: select * from list_bucketing_mul_col 
 where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = 
"val_466"


Reply via email to