[2/2] hive git commit: HIVE-20291: Allow HiveStreamingConnection to receive a WriteId (Jaume Marhuenda reviewed by Prasanth Jayachandran)

2018-10-11 Thread prasanthj
HIVE-20291: Allow HiveStreamingConnection to receive a WriteId (Jaume Marhuenda 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bdbd3bcf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bdbd3bcf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bdbd3bcf

Branch: refs/heads/master
Commit: bdbd3bcffac9f7fe1d3babb45eb40547b1499bb5
Parents: 7c4d48e
Author: Jaume Marhuenda 
Authored: Thu Oct 11 16:22:22 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu Oct 11 16:23:34 2018 -0700

--
 .../hive/streaming/AbstractRecordWriter.java|  65 +-
 .../streaming/AbstractStreamingTransaction.java | 156 +
 .../apache/hive/streaming/ConnectionStats.java  |  38 +-
 .../hive/streaming/HiveStreamingConnection.java | 697 ++-
 .../hive/streaming/InvalidTransactionState.java |   5 +-
 .../apache/hive/streaming/PartitionInfo.java|   1 +
 .../org/apache/hive/streaming/RecordWriter.java |  38 +-
 .../hive/streaming/StreamingConnection.java |  43 ++
 .../hive/streaming/StreamingTransaction.java| 113 +++
 .../apache/hive/streaming/TransactionBatch.java | 430 
 .../apache/hive/streaming/TransactionError.java |   7 +-
 .../streaming/UnManagedSingleTransaction.java   | 135 
 .../org/apache/hive/streaming/package-info.java |  22 +
 .../java/org/apache/hive/streaming/package.html |   3 +-
 .../apache/hive/streaming/TestStreaming.java| 433 ++--
 15 files changed, 1616 insertions(+), 570 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bdbd3bcf/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
--
diff --git 
a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java 
b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
index 9e90d36..88a7d82 100644
--- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
+++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
@@ -46,9 +46,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.LlapUtil;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -66,6 +68,7 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
   private static final Logger LOG = 
LoggerFactory.getLogger(AbstractRecordWriter.class.getName());
 
   private static final String DEFAULT_LINE_DELIMITER_PATTERN = "[\r\n]";
+  private Integer statementId;
   protected HiveConf conf;
   protected StreamingConnection conn;
   protected Table table;
@@ -128,13 +131,21 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
   }
 
   @Override
-  public void init(StreamingConnection conn, long minWriteId, long maxWriteId) 
throws StreamingException {
+  public void init(StreamingConnection conn, long minWriteId, long maxWriteId)
+  throws StreamingException {
+init(conn, minWriteId, maxWriteId, -1);
+  }
+
+  @Override
+  public void init(StreamingConnection conn, long minWriteId, long maxWriteId,
+  int statementId) throws StreamingException {
 if (conn == null) {
   throw new StreamingException("Streaming connection cannot be null during 
record writer initialization");
 }
 this.conn = conn;
 this.curBatchMinWriteId = minWriteId;
 this.curBatchMaxWriteId = maxWriteId;
+this.statementId = statementId;
 this.conf = conn.getHiveConf();
 this.defaultPartitionName = 
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
 this.table = conn.getTable();
@@ -431,6 +442,7 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
   int bucket = getBucket(encodedRow);
   List partitionValues = getPartitionValues(encodedRow);
   getRecordUpdater(partitionValues, bucket).insert(writeId, encodedRow);
+
   // ingest size bytes gets resetted on flush() whereas connection stats 
is not
   conn.getConnectionStats().incrementRecordsWritten();
   conn.getConnectionStats().incrementRecordsSize(record.length);
@@ -492,10 +504,53 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
 .tableProperties(tblProperties)
 .minimumWriteId(minWriteId)
 .maximumWriteId(maxWriteID)
-   

[1/2] hive git commit: HIVE-20291: Allow HiveStreamingConnection to receive a WriteId (Jaume Marhuenda reviewed by Prasanth Jayachandran)

2018-10-11 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master 7c4d48ec2 -> bdbd3bcff


http://git-wip-us.apache.org/repos/asf/hive/blob/bdbd3bcf/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
--
diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java 
b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
index 8b5e508..1c9e43f 100644
--- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
+++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -60,13 +61,13 @@ import 
org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.LockType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
-import org.apache.hadoop.hive.metastore.api.TxnState;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
@@ -82,6 +83,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
 import org.apache.hadoop.hive.ql.io.orc.Reader;
 import org.apache.hadoop.hive.ql.io.orc.RecordReader;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.txn.compactor.Worker;
@@ -150,7 +152,8 @@ public class TestStreaming {
   if (file.canExecute()) {
 mod |= 0111;
   }
-  return new FileStatus(file.length(), file.isDirectory(), 1, 1024,
+  return new FileStatus(file.length(), file.isDirectory(),
+  1, 1024,
 file.lastModified(), file.lastModified(),
 FsPermission.createImmutable(mod), "owen", "users", path);
 }
@@ -419,6 +422,123 @@ public class TestStreaming {
   }
 
   @Test
+  public void testGetDeltaPath() throws Exception {
+StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder()
+.withFieldDelimiter(',')
+.build();
+HiveStreamingConnection connection = HiveStreamingConnection.newBuilder()
+.withDatabase(dbName)
+.withTable(tblName)
+.withRecordWriter(writer)
+.withHiveConf(conf)
+.connect();
+Path path = connection.getDeltaFileLocation(partitionVals, 0,
+5L, 5L, 9);
+Assert.assertTrue(path.toString().endsWith("testing.db/alerts/continent"
++ "=Asia/country=India/delta_005_005_0009/bucket_0"));
+  }
+
+  @Test
+  public void testConnectionWithWriteId() throws Exception {
+queryTable(driver, "drop table if exists default.writeidconnection");
+queryTable(driver, "create table default.writeidconnection (a string, b 
string) stored as orc " +
+"TBLPROPERTIES('transactional'='true')");
+queryTable(driver, "insert into default.writeidconnection 
values('a0','bar')");
+
+List rs = queryTable(driver, "select * from 
default.writeidconnection");
+Assert.assertEquals(1, rs.size());
+Assert.assertEquals("a0\tbar", rs.get(0));
+
+StrictDelimitedInputWriter writerT = 
StrictDelimitedInputWriter.newBuilder()
+.withFieldDelimiter(',')
+.build();
+HiveStreamingConnection transactionConnection = 
HiveStreamingConnection.newBuilder()
+.withDatabase("Default")
+.withTable("writeidconnection")
+.withRecordWriter(writerT)
+.withHiveConf(conf)
+.connect();
+transactionConnection.beginTransaction();
+
+Table tObject = transactionConnection.getTable();
+Long writeId = transactionConnection.getCurrentWriteId();
+
+Assert.assertNotNull(tObject);
+Assert.assertNotNull(writeId);
+
+StrictDelimitedInputWriter writerOne = 
StrictDelimitedInputWriter.newBuilder()
+.withFieldDelimiter(',')
+.build();
+HiveStreamingConnection connectionOne = 
HiveStreamingConnection.newBuilder()
+.withDatabase("Default")
+.withTable("writeidconnection")
+.withRecordWriter(writerOne)
+.withHiveConf(conf)
+.withWriteId(writeId)
+.withStatementId(1)
+.withTableObject(tObject)

hive git commit: HIVE-20729: TestJdbcWithMiniLlapArrow.testKillQuery fail frequently (Jesus Camacho Rodriguez, reviewed by Sergey Shelukhin)

2018-10-11 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 1a7dbe06c -> 7c4d48ec2


HIVE-20729: TestJdbcWithMiniLlapArrow.testKillQuery fail frequently (Jesus 
Camacho Rodriguez, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7c4d48ec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7c4d48ec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7c4d48ec

Branch: refs/heads/master
Commit: 7c4d48ec2835303a7c8d45eec6caac9ecd08c17b
Parents: 1a7dbe0
Author: Jesus Camacho Rodriguez 
Authored: Thu Oct 11 11:42:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Oct 11 11:42:44 2018 -0700

--
 .../org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7c4d48ec/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
index 2e7c21f..e125ba3 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
@@ -43,11 +43,16 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * TestJdbcWithMiniLlap for Arrow format
  */
 public class TestJdbcWithMiniLlapArrow extends BaseJdbcWithMiniLlap {
+
+  protected static final Logger LOG = 
LoggerFactory.getLogger(TestJdbcWithMiniLlapArrow.class);
+
   private static MiniHS2 miniHS2 = null;
   private static final String tableName = "testJdbcMinihs2Tbl";
   private static String dataFileDir;
@@ -350,12 +355,13 @@ public class TestJdbcWithMiniLlapArrow extends 
BaseJdbcWithMiniLlap {
   stmt.close();
   con2.close();
   con.close();
+  // We check the result
+  assertNotNull("tExecute", tExecuteHolder.throwable);
+  assertNull("tCancel", tKillHolder.throwable);
 } catch (Exception e) {
   // ignore error
+  LOG.error("Exception in testKillQuery", e);
 }
-
-assertNotNull("tExecute", tExecuteHolder.throwable);
-assertNull("tCancel", tKillHolder.throwable);
   }
 
 }



hive git commit: HIVE-20727: Disable flaky test: stat_estimate_related_col.q (Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)

2018-10-11 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 37c7fd783 -> 1a7dbe06c


HIVE-20727: Disable flaky test: stat_estimate_related_col.q (Jesus Camacho 
Rodriguez, reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a7dbe06
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a7dbe06
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a7dbe06

Branch: refs/heads/master
Commit: 1a7dbe06c7c13e822852e9b0ca619cfd989448f7
Parents: 37c7fd7
Author: Jesus Camacho Rodriguez 
Authored: Thu Oct 11 10:10:48 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu Oct 11 10:10:48 2018 -0700

--
 .../main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1a7dbe06/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 11f87f4..4110afc 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -60,6 +60,7 @@ public class CliConfigs {
 
 excludeQuery("fouter_join_ppr.q"); // Disabled in HIVE-19509
 excludeQuery("udaf_histogram_numeric.q"); // disabled in HIVE-20715
+excludeQuery("stat_estimate_related_col.q"); // disabled in HIVE-20727
 
 setResultsDir("ql/src/test/results/clientpositive");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");