hive git commit: HIVE-14084: Branch-1: HIVE-13985 backport to branch-1 introduced regression (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-06-23 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-1 e8d71a22c -> 46c847968


HIVE-14084: Branch-1: HIVE-13985 backport to branch-1 introduced regression 
(Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/46c84796
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/46c84796
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/46c84796

Branch: refs/heads/branch-1
Commit: 46c8479685a2a7ab9a77ff54e12f11bfaaebd84a
Parents: e8d71a2
Author: Prasanth Jayachandran 
Authored: Thu Jun 23 15:17:00 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Thu Jun 23 15:17:00 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/46c84796/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 1b7f6d6..30a3f6a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -455,9 +455,8 @@ public class ReaderImpl implements Reader {
   int bufferSize) throws IOException {
 bb.position(footerAbsPos);
 bb.limit(footerAbsPos + footerSize);
-InputStream instream = InStream.create("footer",
+CodedInputStream in = InStream.createCodedInputStream("footer",
 Lists.newArrayList(new BufferChunk(bb, 0)), footerSize, 
codec, bufferSize);
-CodedInputStream in = CodedInputStream.newInstance(instream);
 return OrcProto.Footer.parseFrom(in);
   }
 
@@ -465,9 +464,8 @@ public class ReaderImpl implements Reader {
   int metadataSize, CompressionCodec codec, int bufferSize) throws 
IOException {
 bb.position(metadataAbsPos);
 bb.limit(metadataAbsPos + metadataSize);
-InputStream inputStream = InStream.create("metadata",
+CodedInputStream in = InStream.createCodedInputStream("metadata",
 Lists.newArrayList(new BufferChunk(bb, 0)), metadataSize, 
codec, bufferSize);
-CodedInputStream in = CodedInputStream.newInstance(inputStream);
 return OrcProto.Metadata.parseFrom(in);
   }
 



hive git commit: HIVE-14028: Insert overwrite does not work in HBase tables: stats is not updated(Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-06-23 Thread pxiong
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 8caf81eda -> cf127f7af


HIVE-14028: Insert overwrite does not work in HBase tables: stats is not 
updated(Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cf127f7a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cf127f7a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cf127f7a

Branch: refs/heads/branch-2.1
Commit: cf127f7afe6a04e00a4ff188021cfbd6ae692a1e
Parents: 8caf81e
Author: Pengcheng Xiong 
Authored: Thu Jun 23 10:25:11 2016 -0700
Committer: Pengcheng Xiong 
Committed: Thu Jun 23 10:26:53 2016 -0700

--
 .../src/test/queries/positive/hbasestats.q  |  44 +++
 .../test/results/positive/hbase_queries.q.out   |  26 +-
 .../hbase_single_sourced_multi_insert.q.out |  39 +-
 .../src/test/results/positive/hbasestats.q.out  | 389 +++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  22 +-
 5 files changed, 500 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cf127f7a/hbase-handler/src/test/queries/positive/hbasestats.q
--
diff --git a/hbase-handler/src/test/queries/positive/hbasestats.q 
b/hbase-handler/src/test/queries/positive/hbasestats.q
new file mode 100644
index 000..52e11c9
--- /dev/null
+++ b/hbase-handler/src/test/queries/positive/hbasestats.q
@@ -0,0 +1,44 @@
+DROP TABLE users;
+
+CREATE TABLE users(key string, state string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+WITH SERDEPROPERTIES (
+"hbase.columns.mapping" = "info:state,info:country,info:country_id"
+);
+
+desc formatted users;
+
+explain INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 FROM src;
+
+INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+select count(*) from users;
+
+set hive.compute.query.using.stats=true;
+
+select count(*) from users;
+
+INSERT into TABLE users SELECT 'user2', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+select count(*) from users;
+
+analyze table users compute statistics;
+
+desc formatted users;
+
+explain select count(*) from users;
+
+select count(*) from users;
+
+INSERT into TABLE users SELECT 'user3', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+explain select count(*) from users;
+
+select count(*) from users;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/cf127f7a/hbase-handler/src/test/results/positive/hbase_queries.q.out
--
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out 
b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index 8aa5f84..d0ce57a 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -40,9 +40,18 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE 
hbase_table_1 SELECT *
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
+  Stage-1 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
+  Alter Table Operator:
+Alter Table
+  type: drop props
+  old name: default.hbase_table_1
+  properties:
+COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-1
 Map Reduce
   Map Operator Tree:
   TableScan
@@ -483,11 +492,20 @@ JOIN
 ON (x.key = Y.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-0
+  Alter Table Operator:
+Alter Table
+  type: drop props
+  old name: default.hbase_table_3
+  properties:
+COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-2
 Map Reduce
   Map Operator Tree:
   TableScan
@@ -522,7 +540,7 @@ STAGE PLANS:
 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-0
+  Stage: Stage-1
 Map Reduce
   Map Operator Tree:
   TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/cf127f7a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
--
diff --git 
a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
 
b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index 38c51b5..e487a0b 100644
--- 

hive git commit: HIVE-14028: Insert overwrite does not work in HBase tables: stats is not updated(Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-06-23 Thread pxiong
Repository: hive
Updated Branches:
  refs/heads/master 5b82e5e9f -> 3bc615f82


HIVE-14028: Insert overwrite does not work in HBase tables: stats is not 
updated(Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3bc615f8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3bc615f8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3bc615f8

Branch: refs/heads/master
Commit: 3bc615f827e0246b3ae4ab70d0fb8b360aa25424
Parents: 5b82e5e
Author: Pengcheng Xiong 
Authored: Thu Jun 23 10:25:11 2016 -0700
Committer: Pengcheng Xiong 
Committed: Thu Jun 23 10:25:11 2016 -0700

--
 .../src/test/queries/positive/hbasestats.q  |  44 +++
 .../test/results/positive/hbase_queries.q.out   |  26 +-
 .../hbase_single_sourced_multi_insert.q.out |  39 +-
 .../src/test/results/positive/hbasestats.q.out  | 389 +++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  22 +-
 5 files changed, 500 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3bc615f8/hbase-handler/src/test/queries/positive/hbasestats.q
--
diff --git a/hbase-handler/src/test/queries/positive/hbasestats.q 
b/hbase-handler/src/test/queries/positive/hbasestats.q
new file mode 100644
index 000..52e11c9
--- /dev/null
+++ b/hbase-handler/src/test/queries/positive/hbasestats.q
@@ -0,0 +1,44 @@
+DROP TABLE users;
+
+CREATE TABLE users(key string, state string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+WITH SERDEPROPERTIES (
+"hbase.columns.mapping" = "info:state,info:country,info:country_id"
+);
+
+desc formatted users;
+
+explain INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 FROM src;
+
+INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+select count(*) from users;
+
+set hive.compute.query.using.stats=true;
+
+select count(*) from users;
+
+INSERT into TABLE users SELECT 'user2', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+select count(*) from users;
+
+analyze table users compute statistics;
+
+desc formatted users;
+
+explain select count(*) from users;
+
+select count(*) from users;
+
+INSERT into TABLE users SELECT 'user3', 'IA', 'USA', 0 FROM src;
+
+desc formatted users;
+
+explain select count(*) from users;
+
+select count(*) from users;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/3bc615f8/hbase-handler/src/test/results/positive/hbase_queries.q.out
--
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out 
b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index a99f561..d5c1cfa 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -40,9 +40,18 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE 
hbase_table_1 SELECT *
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
+  Stage-1 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
+  Alter Table Operator:
+Alter Table
+  type: drop props
+  old name: default.hbase_table_1
+  properties:
+COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-1
 Map Reduce
   Map Operator Tree:
   TableScan
@@ -483,11 +492,20 @@ JOIN
 ON (x.key = Y.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-0
+  Alter Table Operator:
+Alter Table
+  type: drop props
+  old name: default.hbase_table_3
+  properties:
+COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-2
 Map Reduce
   Map Operator Tree:
   TableScan
@@ -522,7 +540,7 @@ STAGE PLANS:
 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-0
+  Stage: Stage-1
 Map Reduce
   Map Operator Tree:
   TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/3bc615f8/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
--
diff --git 
a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
 
b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index 38c51b5..e487a0b 100644
--- 

hive git commit: HIVE-14076: Vectorization is not supported for datatype:VOID error while inserting data into specific columns (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-06-23 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 5cd6bb954 -> 5b82e5e9f


HIVE-14076: Vectorization is not supported for datatype:VOID error while 
inserting data into specific columns (Jesus Camacho Rodriguez, reviewed by 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5b82e5e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5b82e5e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5b82e5e9

Branch: refs/heads/master
Commit: 5b82e5e9f2982f64e78efe69bea7bb28128bdb8a
Parents: 5cd6bb9
Author: Jesus Camacho Rodriguez 
Authored: Thu Jun 23 08:15:41 2016 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Jun 23 09:48:57 2016 -0700

--
 .../test/resources/testconfiguration.properties |  3 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 12 ++---
 .../acid_vectorization_missing_cols.q   | 21 
 .../results/clientpositive/cbo_rp_insert.q.out  |  2 +-
 .../insert_into_with_schema.q.out   | 18 +++
 .../insert_into_with_schema2.q.out  |  4 +-
 .../tez/acid_vectorization_missing_cols.q.out   | 56 
 7 files changed, 97 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5b82e5e9/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index e2de63e..8ef978e 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -412,7 +412,8 @@ minitez.query.files.shared=acid_globallimit.q,\
   union_type_chk.q
 
 
-minitez.query.files=bucket_map_join_tez1.q,\
+minitez.query.files=acid_vectorization_missing_cols.q,\
+  bucket_map_join_tez1.q,\
   smb_cache.q,\
   bucket_map_join_tez2.q,\
   constprog_dpp.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/5b82e5e9/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 774cc2b..53f3b05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -4282,12 +4282,13 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 "No table/partition found in QB metadata for dest='" + dest + "'"));
 }
 ArrayList new_col_list = new ArrayList();
-ArrayList newSchema = new ArrayList();
 colListPos = 0;
 List targetTableCols = target != null ? target.getCols() : 
partition.getCols();
 List targetTableColNames = new ArrayList();
+List targetTableColTypes = new ArrayList();
 for(FieldSchema fs : targetTableCols) {
   targetTableColNames.add(fs.getName());
+  
targetTableColTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()));
 }
 Map partSpec = qb.getMetaData().getPartSpecForAlias(dest);
 if(partSpec != null) {
@@ -4296,13 +4297,15 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   for(Map.Entry partKeyVal : partSpec.entrySet()) {
 if (partKeyVal.getValue() == null) {
   targetTableColNames.add(partKeyVal.getKey());//these must be after 
non-partition cols
+  targetTableColTypes.add(TypeInfoFactory.stringTypeInfo);
 }
   }
 }
 RowResolver newOutputRR = new RowResolver();
 //now make the select produce , with
 //where missing columns are NULL-filled
-for(String f : targetTableColNames) {
+for (int i = 0; i < targetTableColNames.size(); i++) {
+  String f = targetTableColNames.get(i);
   if(targetCol2Projection.containsKey(f)) {
 //put existing column in new list to make sure it is in the right 
position
 new_col_list.add(targetCol2Projection.get(f));
@@ -4312,10 +4315,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   }
   else {
 //add new 'synthetic' columns for projections not provided by Select
-TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
-CommonToken t = new CommonToken(HiveParser.TOK_NULL);
-t.setText("TOK_NULL");
-ExprNodeDesc exp = genExprNodeDesc(new ASTNode(t), inputRR, tcCtx);
+ExprNodeDesc exp = new 
ExprNodeConstantDesc(targetTableColTypes.get(i), null);
 new_col_list.add(exp);
 final String tableAlias = null;//this column doesn't come from any 
table
 ColumnInfo colInfo = new 

hive git commit: HIVE-14076: Vectorization is not supported for datatype:VOID error while inserting data into specific columns (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-06-23 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 ac67fa647 -> 8caf81eda


HIVE-14076: Vectorization is not supported for datatype:VOID error while 
inserting data into specific columns (Jesus Camacho Rodriguez, reviewed by 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8caf81ed
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8caf81ed
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8caf81ed

Branch: refs/heads/branch-2.1
Commit: 8caf81eda744709094992890150aefe45aa669ba
Parents: ac67fa6
Author: Jesus Camacho Rodriguez 
Authored: Thu Jun 23 08:10:27 2016 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Jun 23 09:49:57 2016 -0700

--
 .../test/resources/testconfiguration.properties |  3 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 12 ++---
 .../acid_vectorization_missing_cols.q   | 21 
 .../results/clientpositive/cbo_rp_insert.q.out  |  2 +-
 .../insert_into_with_schema.q.out   | 18 +++
 .../insert_into_with_schema2.q.out  |  4 +-
 .../tez/acid_vectorization_missing_cols.q.out   | 56 
 7 files changed, 97 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8caf81ed/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 0076e2f..d0cc0b7 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -408,7 +408,8 @@ minitez.query.files.shared=acid_globallimit.q,\
   union_type_chk.q
 
 
-minitez.query.files=bucket_map_join_tez1.q,\
+minitez.query.files=acid_vectorization_missing_cols.q,\
+  bucket_map_join_tez1.q,\
   smb_cache.q,\
   bucket_map_join_tez2.q,\
   constprog_dpp.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/8caf81ed/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 774cc2b..53f3b05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -4282,12 +4282,13 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 "No table/partition found in QB metadata for dest='" + dest + "'"));
 }
 ArrayList new_col_list = new ArrayList();
-ArrayList newSchema = new ArrayList();
 colListPos = 0;
 List targetTableCols = target != null ? target.getCols() : 
partition.getCols();
 List targetTableColNames = new ArrayList();
+List targetTableColTypes = new ArrayList();
 for(FieldSchema fs : targetTableCols) {
   targetTableColNames.add(fs.getName());
+  
targetTableColTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()));
 }
 Map partSpec = qb.getMetaData().getPartSpecForAlias(dest);
 if(partSpec != null) {
@@ -4296,13 +4297,15 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   for(Map.Entry partKeyVal : partSpec.entrySet()) {
 if (partKeyVal.getValue() == null) {
   targetTableColNames.add(partKeyVal.getKey());//these must be after 
non-partition cols
+  targetTableColTypes.add(TypeInfoFactory.stringTypeInfo);
 }
   }
 }
 RowResolver newOutputRR = new RowResolver();
 //now make the select produce , with
 //where missing columns are NULL-filled
-for(String f : targetTableColNames) {
+for (int i = 0; i < targetTableColNames.size(); i++) {
+  String f = targetTableColNames.get(i);
   if(targetCol2Projection.containsKey(f)) {
 //put existing column in new list to make sure it is in the right 
position
 new_col_list.add(targetCol2Projection.get(f));
@@ -4312,10 +4315,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   }
   else {
 //add new 'synthetic' columns for projections not provided by Select
-TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
-CommonToken t = new CommonToken(HiveParser.TOK_NULL);
-t.setText("TOK_NULL");
-ExprNodeDesc exp = genExprNodeDesc(new ASTNode(t), inputRR, tcCtx);
+ExprNodeDesc exp = new 
ExprNodeConstantDesc(targetTableColTypes.get(i), null);
 new_col_list.add(exp);
 final String tableAlias = null;//this column doesn't come from any 
table
 ColumnInfo colInfo = new 

hive git commit: HIVE-14021: When converting to CNF, fail if the expression exceeds a threshold (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-06-23 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 f5a598cba -> ac67fa647


HIVE-14021: When converting to CNF, fail if the expression exceeds a threshold 
(Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ac67fa64
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ac67fa64
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ac67fa64

Branch: refs/heads/branch-2.1
Commit: ac67fa647e22ec7ddcbe289967c7350366d156f6
Parents: f5a598c
Author: Jesus Camacho Rodriguez 
Authored: Thu Jun 23 07:50:25 2016 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Jun 23 07:50:25 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   3 +
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  | 155 +++
 .../rules/HivePointLookupOptimizerRule.java |  31 ++--
 .../calcite/rules/HivePreFilteringRule.java |  25 +--
 .../hadoop/hive/ql/parse/CalcitePlanner.java|   8 +-
 .../optimizer/calcite/TestCBOMaxNumToCNF.java   |  83 ++
 6 files changed, 274 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ac67fa64/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7af818a..eea65e4 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -990,6 +990,9 @@ public class HiveConf extends Configuration {
 
 // CBO related
 HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost 
Based Optimizations using Calcite framework."),
+HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to 
conjunctive normal form (CNF), fail if" +
+"the expression exceeds this threshold; the threshold is expressed in 
terms of number of nodes (leaves and" +
+"interior nodes). -1 to not set up a threshold."),
 HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to 
control calcite plan to hive operator conversion"),
 HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag 
to control enabling the extended cost model based on"
  + "CPU, IO and cardinality. Otherwise, the 
cost model is based on cardinality."),

http://git-wip-us.apache.org/repos/asf/hive/blob/ac67fa64/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
index d466378..87fc1b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
@@ -38,6 +38,7 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ControlFlowException;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 import org.slf4j.Logger;
@@ -51,6 +52,160 @@ public class HiveRexUtil {
 
   protected static final Logger LOG = 
LoggerFactory.getLogger(HiveRexUtil.class);
 
+
+  /** Converts an expression to conjunctive normal form (CNF).
+   *
+   * The following expression is in CNF:
+   *
+   * (a OR b) AND (c OR d)
+   *
+   * The following expression is not in CNF:
+   *
+   * (a AND b) OR c
+   *
+   * but can be converted to CNF:
+   *
+   * (a OR c) AND (b OR c)
+   *
+   * The following expression is not in CNF:
+   *
+   * NOT (a OR NOT b)
+   *
+   * but can be converted to CNF by applying de Morgan's theorem:
+   *
+   * NOT a AND b
+   *
+   * Expressions not involving AND, OR or NOT at the top level are in CNF.
+   */
+  public static RexNode toCnf(RexBuilder rexBuilder, RexNode rex) {
+return new CnfHelper(rexBuilder).toCnf(rex);
+  }
+
+  public static RexNode toCnf(RexBuilder rexBuilder, int maxCNFNodeCount, 
RexNode rex) {
+return new CnfHelper(rexBuilder, maxCNFNodeCount).toCnf(rex);
+  }
+
+  /** Helps {@link org.apache.calcite.rex.RexUtil#toCnf}. */
+  private static class CnfHelper {
+final RexBuilder rexBuilder;
+int currentCount;
+final int maxNodeCount;
+
+private CnfHelper(RexBuilder rexBuilder) {
+  this(rexBuilder, Integer.MAX_VALUE);
+}
+
+private CnfHelper(RexBuilder rexBuilder, int maxNodeCount) {
+  this.rexBuilder 

hive git commit: HIVE-14021: When converting to CNF, fail if the expression exceeds a threshold (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-06-23 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 5dfe23efa -> 5cd6bb954


HIVE-14021: When converting to CNF, fail if the expression exceeds a threshold 
(Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5cd6bb95
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5cd6bb95
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5cd6bb95

Branch: refs/heads/master
Commit: 5cd6bb9540d7857851b6042f4b30d413c7324a98
Parents: 5dfe23e
Author: Jesus Camacho Rodriguez 
Authored: Wed Jun 22 07:51:33 2016 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Jun 23 07:47:04 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   3 +
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  | 155 +++
 .../rules/HivePointLookupOptimizerRule.java |  31 ++--
 .../calcite/rules/HivePreFilteringRule.java |  25 +--
 .../hadoop/hive/ql/parse/CalcitePlanner.java|   8 +-
 .../optimizer/calcite/TestCBOMaxNumToCNF.java   |  83 ++
 6 files changed, 274 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5cd6bb95/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index faf9088..1d1306f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -990,6 +990,9 @@ public class HiveConf extends Configuration {
 
 // CBO related
 HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost 
Based Optimizations using Calcite framework."),
+HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to 
conjunctive normal form (CNF), fail if" +
+"the expression exceeds this threshold; the threshold is expressed in 
terms of number of nodes (leaves and" +
+"interior nodes). -1 to not set up a threshold."),
 HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to 
control calcite plan to hive operator conversion"),
 HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag 
to control enabling the extended cost model based on"
  + "CPU, IO and cardinality. Otherwise, the 
cost model is based on cardinality."),

http://git-wip-us.apache.org/repos/asf/hive/blob/5cd6bb95/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
index d466378..87fc1b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
@@ -38,6 +38,7 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ControlFlowException;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 import org.slf4j.Logger;
@@ -51,6 +52,160 @@ public class HiveRexUtil {
 
   protected static final Logger LOG = 
LoggerFactory.getLogger(HiveRexUtil.class);
 
+
+  /** Converts an expression to conjunctive normal form (CNF).
+   *
+   * The following expression is in CNF:
+   *
+   * (a OR b) AND (c OR d)
+   *
+   * The following expression is not in CNF:
+   *
+   * (a AND b) OR c
+   *
+   * but can be converted to CNF:
+   *
+   * (a OR c) AND (b OR c)
+   *
+   * The following expression is not in CNF:
+   *
+   * NOT (a OR NOT b)
+   *
+   * but can be converted to CNF by applying de Morgan's theorem:
+   *
+   * NOT a AND b
+   *
+   * Expressions not involving AND, OR or NOT at the top level are in CNF.
+   */
+  public static RexNode toCnf(RexBuilder rexBuilder, RexNode rex) {
+return new CnfHelper(rexBuilder).toCnf(rex);
+  }
+
+  public static RexNode toCnf(RexBuilder rexBuilder, int maxCNFNodeCount, 
RexNode rex) {
+return new CnfHelper(rexBuilder, maxCNFNodeCount).toCnf(rex);
+  }
+
+  /** Helps {@link org.apache.calcite.rex.RexUtil#toCnf}. */
+  private static class CnfHelper {
+final RexBuilder rexBuilder;
+int currentCount;
+final int maxNodeCount;
+
+private CnfHelper(RexBuilder rexBuilder) {
+  this(rexBuilder, Integer.MAX_VALUE);
+}
+
+private CnfHelper(RexBuilder rexBuilder, int maxNodeCount) {
+  this.rexBuilder = 

hive git commit: HIVE-7443: Fix HiveConnection to communicate with Kerberized Hive JDBC server and alternative JDKs (Yu Gao & Aihua Xu, reviewed by Chaoyu Tang)

2016-06-23 Thread aihuaxu
Repository: hive
Updated Branches:
  refs/heads/master b7e8d0c9c -> 5dfe23efa


HIVE-7443: Fix HiveConnection to communicate with Kerberized Hive JDBC server 
and alternative JDKs (Yu Gao & Aihua Xu, reviewed by Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5dfe23ef
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5dfe23ef
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5dfe23ef

Branch: refs/heads/master
Commit: 5dfe23efa964466585cdd50f26ccb054dd3a5859
Parents: b7e8d0c
Author: Aihua Xu 
Authored: Wed Jun 8 11:50:47 2016 -0400
Committer: Aihua Xu 
Committed: Thu Jun 23 09:34:32 2016 -0400

--
 .../hive/thrift/HadoopThriftAuthBridge.java | 30 
 1 file changed, 18 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5dfe23ef/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
index 86eb46d..d420d09 100644
--- 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
+++ 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
@@ -179,9 +179,9 @@ public abstract class HadoopThriftAuthBridge {
 
 public TTransport createClientTransport(
 String principalConfig, String host,
-String methodStr, String tokenStrForm, TTransport underlyingTransport,
-Map saslProps) throws IOException {
-  AuthMethod method = AuthMethod.valueOf(AuthMethod.class, methodStr);
+String methodStr, String tokenStrForm, final TTransport 
underlyingTransport,
+final Map saslProps) throws IOException {
+  final AuthMethod method = AuthMethod.valueOf(AuthMethod.class, 
methodStr);
 
   TTransport saslTransport = null;
   switch (method) {
@@ -198,21 +198,27 @@ public abstract class HadoopThriftAuthBridge {
 
   case KERBEROS:
 String serverPrincipal = 
SecurityUtil.getServerPrincipal(principalConfig, host);
-String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
+final String names[] = 
SaslRpcServer.splitKerberosName(serverPrincipal);
 if (names.length != 3) {
   throw new IOException(
   "Kerberos principal name does NOT have the expected hostname 
part: "
   + serverPrincipal);
 }
 try {
-  saslTransport = new TSaslClientTransport(
-  method.getMechanismName(),
-  null,
-  names[0], names[1],
-  saslProps, null,
-  underlyingTransport);
-  return new TUGIAssumingTransport(saslTransport, 
UserGroupInformation.getCurrentUser());
-} catch (SaslException se) {
+  return UserGroupInformation.getCurrentUser().doAs(
+  new PrivilegedExceptionAction() {
+@Override
+public TUGIAssumingTransport run() throws IOException {
+  TTransport saslTransport = new TSaslClientTransport(
+method.getMechanismName(),
+null,
+names[0], names[1],
+saslProps, null,
+underlyingTransport);
+  return new TUGIAssumingTransport(saslTransport, 
UserGroupInformation.getCurrentUser());
+}
+  });
+} catch (InterruptedException | SaslException se) {
   throw new IOException("Could not instantiate SASL transport", se);
 }
 



[2/2] hive git commit: HIVE-13872: Vectorization: Fix cross-product reduce sink serialization (Matt McCline, reviewed by Gopal Vijayaraghavan)

2016-06-23 Thread mmccline
HIVE-13872: Vectorization: Fix cross-product reduce sink serialization (Matt 
McCline, reviewed by Gopal Vijayaraghavan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b7e8d0c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b7e8d0c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b7e8d0c9

Branch: refs/heads/master
Commit: b7e8d0c9c40cd3541aa89228e20372e1fa863c3a
Parents: 5783ab8
Author: Matt McCline 
Authored: Thu Jun 23 03:40:53 2016 -0700
Committer: Matt McCline 
Committed: Thu Jun 23 03:40:53 2016 -0700

--
 data/files/customer_demographics.txt| 200 +
 .../test/resources/testconfiguration.properties |   2 +
 .../hive/llap/io/api/impl/LlapInputFormat.java  |   7 +-
 .../org/apache/orc/impl/TreeReaderFactory.java  |   9 +-
 .../hive/ql/exec/vector/VectorAssignRow.java|  24 +-
 .../ql/exec/vector/VectorDeserializeRow.java|  38 +--
 .../hive/ql/exec/vector/VectorExtractRow.java   |   4 +-
 .../hive/ql/exec/vector/VectorMapOperator.java  | 108 +++
 .../ql/exec/vector/VectorizedRowBatchCtx.java   | 144 --
 .../hadoop/hive/ql/io/NullRowsInputFormat.java  |  10 +-
 .../ql/io/orc/VectorizedOrcInputFormat.java |   5 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |  84 --
 .../clientpositive/vector_include_no_sel.q  |  78 +
 .../llap/vector_include_no_sel.q.out| 286 +++
 .../tez/vector_include_no_sel.q.out | 284 ++
 .../clientpositive/vector_include_no_sel.q.out  | 282 ++
 16 files changed, 1355 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b7e8d0c9/data/files/customer_demographics.txt
--
diff --git a/data/files/customer_demographics.txt 
b/data/files/customer_demographics.txt
new file mode 100644
index 000..90ab999
--- /dev/null
+++ b/data/files/customer_demographics.txt
@@ -0,0 +1,200 @@
+1|M|M|Primary|500|Good|0|0|0|
+2|F|M|Primary|500|Good|0|0|0|
+3|M|S|Primary|500|Good|0|0|0|
+4|F|S|Primary|500|Good|0|0|0|
+5|M|D|Primary|500|Good|0|0|0|
+6|F|D|Primary|500|Good|0|0|0|
+7|M|W|Primary|500|Good|0|0|0|
+8|F|W|Primary|500|Good|0|0|0|
+9|M|U|Primary|500|Good|0|0|0|
+10|F|U|Primary|500|Good|0|0|0|
+11|M|M|Secondary|500|Good|0|0|0|
+12|F|M|Secondary|500|Good|0|0|0|
+13|M|S|Secondary|500|Good|0|0|0|
+14|F|S|Secondary|500|Good|0|0|0|
+15|M|D|Secondary|500|Good|0|0|0|
+16|F|D|Secondary|500|Good|0|0|0|
+17|M|W|Secondary|500|Good|0|0|0|
+18|F|W|Secondary|500|Good|0|0|0|
+19|M|U|Secondary|500|Good|0|0|0|
+20|F|U|Secondary|500|Good|0|0|0|
+21|M|M|College|500|Good|0|0|0|
+22|F|M|College|500|Good|0|0|0|
+23|M|S|College|500|Good|0|0|0|
+24|F|S|College|500|Good|0|0|0|
+25|M|D|College|500|Good|0|0|0|
+26|F|D|College|500|Good|0|0|0|
+27|M|W|College|500|Good|0|0|0|
+28|F|W|College|500|Good|0|0|0|
+29|M|U|College|500|Good|0|0|0|
+30|F|U|College|500|Good|0|0|0|
+31|M|M|2 yr Degree|500|Good|0|0|0|
+32|F|M|2 yr Degree|500|Good|0|0|0|
+33|M|S|2 yr Degree|500|Good|0|0|0|
+34|F|S|2 yr Degree|500|Good|0|0|0|
+35|M|D|2 yr Degree|500|Good|0|0|0|
+36|F|D|2 yr Degree|500|Good|0|0|0|
+37|M|W|2 yr Degree|500|Good|0|0|0|
+38|F|W|2 yr Degree|500|Good|0|0|0|
+39|M|U|2 yr Degree|500|Good|0|0|0|
+40|F|U|2 yr Degree|500|Good|0|0|0|
+41|M|M|4 yr Degree|500|Good|0|0|0|
+42|F|M|4 yr Degree|500|Good|0|0|0|
+43|M|S|4 yr Degree|500|Good|0|0|0|
+44|F|S|4 yr Degree|500|Good|0|0|0|
+45|M|D|4 yr Degree|500|Good|0|0|0|
+46|F|D|4 yr Degree|500|Good|0|0|0|
+47|M|W|4 yr Degree|500|Good|0|0|0|
+48|F|W|4 yr Degree|500|Good|0|0|0|
+49|M|U|4 yr Degree|500|Good|0|0|0|
+50|F|U|4 yr Degree|500|Good|0|0|0|
+51|M|M|Advanced Degree|500|Good|0|0|0|
+52|F|M|Advanced Degree|500|Good|0|0|0|
+53|M|S|Advanced Degree|500|Good|0|0|0|
+54|F|S|Advanced Degree|500|Good|0|0|0|
+55|M|D|Advanced Degree|500|Good|0|0|0|
+56|F|D|Advanced Degree|500|Good|0|0|0|
+57|M|W|Advanced Degree|500|Good|0|0|0|
+58|F|W|Advanced Degree|500|Good|0|0|0|
+59|M|U|Advanced Degree|500|Good|0|0|0|
+60|F|U|Advanced Degree|500|Good|0|0|0|
+61|M|M|Unknown|500|Good|0|0|0|
+62|F|M|Unknown|500|Good|0|0|0|
+63|M|S|Unknown|500|Good|0|0|0|
+64|F|S|Unknown|500|Good|0|0|0|
+65|M|D|Unknown|500|Good|0|0|0|
+66|F|D|Unknown|500|Good|0|0|0|
+67|M|W|Unknown|500|Good|0|0|0|
+68|F|W|Unknown|500|Good|0|0|0|
+69|M|U|Unknown|500|Good|0|0|0|
+70|F|U|Unknown|500|Good|0|0|0|
+71|M|M|Primary|1000|Good|0|0|0|
+72|F|M|Primary|1000|Good|0|0|0|
+73|M|S|Primary|1000|Good|0|0|0|
+74|F|S|Primary|1000|Good|0|0|0|
+75|M|D|Primary|1000|Good|0|0|0|
+76|F|D|Primary|1000|Good|0|0|0|
+77|M|W|Primary|1000|Good|0|0|0|
+78|F|W|Primary|1000|Good|0|0|0|
+79|M|U|Primary|1000|Good|0|0|0|
+80|F|U|Primary|1000|Good|0|0|0|

[1/2] hive git commit: HIVE-13872: Vectorization: Fix cross-product reduce sink serialization (Matt McCline, reviewed by Gopal Vijayaraghavan)

2016-06-23 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master 5783ab858 -> b7e8d0c9c


http://git-wip-us.apache.org/repos/asf/hive/blob/b7e8d0c9/ql/src/test/results/clientpositive/tez/vector_include_no_sel.q.out
--
diff --git a/ql/src/test/results/clientpositive/tez/vector_include_no_sel.q.out 
b/ql/src/test/results/clientpositive/tez/vector_include_no_sel.q.out
new file mode 100644
index 000..be991b2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_include_no_sel.q.out
@@ -0,0 +1,284 @@
+PREHOOK: query: -- HIVE-13872
+-- Looking for TableScan immediately followed by ReduceSink (no intervening 
SEL operator).
+-- This caused problems for Vectorizer not eliminating columns which are not 
included.
+-- The input file format didn't fill in those vectorized columns and thus 
caused NPE in
+-- ReduceSink.
+-- Only a problem when NOT CBO because of CBO rule-based transforms.
+--
+-- Using a cross-product.
+
+create table store_sales_txt
+(
+ss_sold_date_sk   int,
+ss_sold_time_sk   int,
+ss_item_skint,
+ss_customer_skint,
+ss_cdemo_sk   int,
+ss_hdemo_sk   int,
+ss_addr_skint,
+ss_store_sk   int,
+ss_promo_sk   int,
+ss_ticket_number  int,
+ss_quantity   int,
+ss_wholesale_cost float,
+ss_list_price float,
+ss_sales_pricefloat,
+ss_ext_discount_amt   float,
+ss_ext_sales_pricefloat,
+ss_ext_wholesale_cost float,
+ss_ext_list_price float,
+ss_ext_taxfloat,
+ss_coupon_amt float,
+ss_net_paid   float,
+ss_net_paid_inc_tax   float,
+ss_net_profit float  
+)
+row format delimited fields terminated by '|' 
+stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@store_sales_txt
+POSTHOOK: query: -- HIVE-13872
+-- Looking for TableScan immediately followed by ReduceSink (no intervening 
SEL operator).
+-- This caused problems for Vectorizer not eliminating columns which are not 
included.
+-- The input file format didn't fill in those vectorized columns and thus 
caused NPE in
+-- ReduceSink.
+-- Only a problem when NOT CBO because of CBO rule-based transforms.
+--
+-- Using a cross-product.
+
+create table store_sales_txt
+(
+ss_sold_date_sk   int,
+ss_sold_time_sk   int,
+ss_item_skint,
+ss_customer_skint,
+ss_cdemo_sk   int,
+ss_hdemo_sk   int,
+ss_addr_skint,
+ss_store_sk   int,
+ss_promo_sk   int,
+ss_ticket_number  int,
+ss_quantity   int,
+ss_wholesale_cost float,
+ss_list_price float,
+ss_sales_pricefloat,
+ss_ext_discount_amt   float,
+ss_ext_sales_pricefloat,
+ss_ext_wholesale_cost float,
+ss_ext_list_price float,
+ss_ext_taxfloat,
+ss_coupon_amt float,
+ss_net_paid   float,
+ss_net_paid_inc_tax   float,
+ss_net_profit float  
+)
+row format delimited fields terminated by '|' 
+stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@store_sales_txt
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' 
OVERWRITE INTO TABLE store_sales_txt
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@store_sales_txt
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' 
OVERWRITE INTO TABLE store_sales_txt
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@store_sales_txt
+PREHOOK: query: create table store_sales stored as orc as select * from 
store_sales_txt
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@store_sales_txt
+PREHOOK: Output: database:default
+PREHOOK: Output: default@store_sales
+POSTHOOK: query: create table store_sales stored as orc as select * from 
store_sales_txt
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@store_sales_txt
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@store_sales
+POSTHOOK: Lineage: store_sales.ss_addr_sk SIMPLE 
[(store_sales_txt)store_sales_txt.FieldSchema(name:ss_addr_sk, type:int, 
comment:null), ]
+POSTHOOK: Lineage: store_sales.ss_cdemo_sk SIMPLE 
[(store_sales_txt)store_sales_txt.FieldSchema(name:ss_cdemo_sk, type:int, 
comment:null), ]
+POSTHOOK: Lineage: store_sales.ss_coupon_amt SIMPLE 
[(store_sales_txt)store_sales_txt.FieldSchema(name:ss_coupon_amt, type:float, 
comment:null), ]
+POSTHOOK: Lineage: store_sales.ss_customer_sk SIMPLE