hive git commit: HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei Zheng, reviewed by Eugene Koifman)

2016-06-08 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 1bc13cb36 -> 185d9c572


HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/185d9c57
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/185d9c57
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/185d9c57

Branch: refs/heads/branch-2.1
Commit: 185d9c57269cc6b673aa3950a8909b7157cd0197
Parents: 1bc13cb
Author: Wei Zheng 
Authored: Wed Jun 8 13:26:14 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 8 13:26:14 2016 -0700

--
 .../hadoop/hive/common/StringableMap.java   | 80 
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  2 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java | 54 -
 .../hive/ql/txn/compactor/CompactorMR.java  |  2 +-
 .../hive/ql/txn/compactor/TestWorker.java   |  2 +-
 5 files changed, 83 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/185d9c57/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/StringableMap.java 
b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
new file mode 100644
index 000..8a93c0f
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * A utility class that can convert a HashMap of Properties into a colon 
separated string,
+ * and can take the same format of string and convert it to a HashMap of 
Properties.
+ */
+public class StringableMap extends HashMap {
+
+  public StringableMap(String s) {
+String[] parts = s.split(":", 2);
+// read that many chars
+int numElements = Integer.parseInt(parts[0]);
+s = parts[1];
+for (int i = 0; i < numElements; i++) {
+  parts = s.split(":", 2);
+  int len = Integer.parseInt(parts[0]);
+  String key = null;
+  if (len > 0) key = parts[1].substring(0, len);
+  parts = parts[1].substring(len).split(":", 2);
+  len = Integer.parseInt(parts[0]);
+  String value = null;
+  if (len > 0) value = parts[1].substring(0, len);
+  s = parts[1].substring(len);
+  put(key, value);
+}
+  }
+
+  public StringableMap(Map m) {
+super(m);
+  }
+
+  @Override
+  public String toString() {
+StringBuilder buf = new StringBuilder();
+buf.append(size());
+buf.append(':');
+if (size() > 0) {
+  for (Map.Entry entry : entrySet()) {
+int length = (entry.getKey() == null) ? 0 : entry.getKey().length();
+buf.append(entry.getKey() == null ? 0 : length);
+buf.append(':');
+if (length > 0) buf.append(entry.getKey());
+length = (entry.getValue() == null) ? 0 : entry.getValue().length();
+buf.append(length);
+buf.append(':');
+if (length > 0) buf.append(entry.getValue());
+  }
+}
+return buf.toString();
+  }
+
+  public Properties toProperties() {
+Properties props = new Properties();
+props.putAll(this);
+return props;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/185d9c57/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index dd7054b..7a89a0c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -37,9 +37,9 @@ import 

hive git commit: HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei Zheng, reviewed by Eugene Koifman)

2016-06-08 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 52c7c04b0 -> df3746565


HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/df374656
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/df374656
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/df374656

Branch: refs/heads/branch-1
Commit: df37465651858fd3502b01044b237d074a5125c4
Parents: 52c7c04
Author: Wei Zheng 
Authored: Wed Jun 8 13:52:45 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 8 13:52:45 2016 -0700

--
 .../hadoop/hive/common/StringableMap.java   | 80 
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  2 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java | 54 -
 .../hive/ql/txn/compactor/CompactorMR.java  |  2 +-
 .../hive/ql/txn/compactor/TestWorker.java   |  2 +-
 5 files changed, 83 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/df374656/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/StringableMap.java 
b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
new file mode 100644
index 000..8a93c0f
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * A utility class that can convert a HashMap of Properties into a colon 
separated string,
+ * and can take the same format of string and convert it to a HashMap of 
Properties.
+ */
+public class StringableMap extends HashMap {
+
+  public StringableMap(String s) {
+String[] parts = s.split(":", 2);
+// read that many chars
+int numElements = Integer.parseInt(parts[0]);
+s = parts[1];
+for (int i = 0; i < numElements; i++) {
+  parts = s.split(":", 2);
+  int len = Integer.parseInt(parts[0]);
+  String key = null;
+  if (len > 0) key = parts[1].substring(0, len);
+  parts = parts[1].substring(len).split(":", 2);
+  len = Integer.parseInt(parts[0]);
+  String value = null;
+  if (len > 0) value = parts[1].substring(0, len);
+  s = parts[1].substring(len);
+  put(key, value);
+}
+  }
+
+  public StringableMap(Map m) {
+super(m);
+  }
+
+  @Override
+  public String toString() {
+StringBuilder buf = new StringBuilder();
+buf.append(size());
+buf.append(':');
+if (size() > 0) {
+  for (Map.Entry entry : entrySet()) {
+int length = (entry.getKey() == null) ? 0 : entry.getKey().length();
+buf.append(entry.getKey() == null ? 0 : length);
+buf.append(':');
+if (length > 0) buf.append(entry.getKey());
+length = (entry.getValue() == null) ? 0 : entry.getValue().length();
+buf.append(length);
+buf.append(':');
+if (length > 0) buf.append(entry.getValue());
+  }
+}
+return buf.toString();
+  }
+
+  public Properties toProperties() {
+Properties props = new Properties();
+props.putAll(this);
+return props;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/df374656/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 129abef..0902121 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -34,13 +34,13 @@ import 

hive git commit: HIVE-13563 : Hive Streaming does not honor orc.compress.size and orc.stripe.size table properties (Wei Zheng, reviewed by Prasanth Jayachandran)

2016-06-09 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master c9c0c5d1d -> f67c862e3


HIVE-13563 : Hive Streaming does not honor orc.compress.size and 
orc.stripe.size table properties (Wei Zheng, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f67c862e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f67c862e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f67c862e

Branch: refs/heads/master
Commit: f67c862e30cc8c3d230853c688c5c70d30d207f3
Parents: c9c0c5d
Author: Wei Zheng 
Authored: Thu Jun 9 10:12:19 2016 -0700
Committer: Wei Zheng 
Committed: Thu Jun 9 10:12:19 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 +
 orc/src/java/org/apache/orc/OrcConf.java|  2 +
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java | 42 +++-
 .../hive/ql/io/orc/TestOrcRecordUpdater.java|  4 ++
 .../clientpositive/tez/acid_globallimit.q.out   |  6 +--
 5 files changed, 42 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f67c862e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index bb0ca3a..fe69ffa 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1206,6 +1206,8 @@ public class HiveConf extends Configuration {
 "to use dictionary or not will be retained thereafter."),
 HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 
1024,
 "Define the default ORC buffer size, in bytes."),
+HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio 
of base writer and\n" +
+"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
 HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true,
 "Define the default block padding, which pads stripes to the HDFS 
block boundaries."),
 HIVE_ORC_BLOCK_PADDING_TOLERANCE("hive.exec.orc.block.padding.tolerance", 
0.05f,

http://git-wip-us.apache.org/repos/asf/hive/blob/f67c862e/orc/src/java/org/apache/orc/OrcConf.java
--
diff --git a/orc/src/java/org/apache/orc/OrcConf.java 
b/orc/src/java/org/apache/orc/OrcConf.java
index 6fcbb72..357318d 100644
--- a/orc/src/java/org/apache/orc/OrcConf.java
+++ b/orc/src/java/org/apache/orc/OrcConf.java
@@ -40,6 +40,8 @@ public enum OrcConf {
   " number of rows n index entry represents.)"),
   BUFFER_SIZE("orc.compress.size", "hive.exec.orc.default.buffer.size",
   256 * 1024, "Define the default ORC buffer size, in bytes."),
+  BASE_DELTA_RATIO("orc.base.delta.ratio", "hive.exec.orc.base.delta.ratio", 8,
+  "The ratio of base writer and delta writer in terms of STRIPE_SIZE and 
BUFFER_SIZE."),
   BLOCK_PADDING("orc.block.padding", "hive.exec.orc.default.block.padding",
   true,
   "Define whether stripes should be padded to the HDFS block boundaries."),

http://git-wip-us.apache.org/repos/asf/hive/blob/f67c862e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 4bf2403..e577961 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -27,6 +27,7 @@ import java.util.List;
 
 import org.apache.orc.impl.AcidStats;
 import org.apache.orc.impl.OrcAcidUtils;
+import org.apache.orc.OrcConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -204,19 +205,38 @@ public class OrcRecordUpdater implements RecordUpdater {
   flushLengths = null;
 }
 OrcFile.WriterOptions writerOptions = null;
-if (options instanceof OrcOptions) {
-  writerOptions = ((OrcOptions) options).getOrcOptions();
-}
-if (writerOptions == null) {
-  writerOptions = OrcFile.writerOptions(options.getTableProperties(),
-  options.getConfiguration());
-}
-writerOptions.fileSystem(fs).callback(indexBuilder);
-if (!options.isWritingBase()) {
+// If writing delta dirs, we need to make a clone of original options, to 
avoid polluting it for
+// the base writer
+if (options.isWritingBase()) {
+  if (options instanceof OrcOptions) {
+writerOptions = ((OrcOptions) options).getOrcOptions();
+  }
+  

hive git commit: HIVE-13563 : Hive Streaming does not honor orc.compress.size and orc.stripe.size table properties (Wei Zheng, reviewed by Prasanth Jayachandran)

2016-06-09 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 df3746565 -> 39decb0bf


HIVE-13563 : Hive Streaming does not honor orc.compress.size and 
orc.stripe.size table properties (Wei Zheng, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/39decb0b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/39decb0b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/39decb0b

Branch: refs/heads/branch-1
Commit: 39decb0bfce8a2848fd293c5fc932f610896d31d
Parents: df37465
Author: Wei Zheng 
Authored: Thu Jun 9 10:12:19 2016 -0700
Committer: Wei Zheng 
Committed: Thu Jun 9 11:26:00 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 +
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |  7 
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java | 40 ++--
 .../hive/ql/io/orc/TestOrcRecordUpdater.java|  4 ++
 4 files changed, 42 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/39decb0b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c63c2ca..e760ed5 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1019,6 +1019,8 @@ public class HiveConf extends Configuration {
 "to use dictionary or not will be retained thereafter."),
 HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 
1024,
 "Define the default ORC buffer size, in bytes."),
+HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio 
of base writer and\n" +
+"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
 HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true,
 "Define the default block padding, which pads stripes to the HDFS 
block boundaries."),
 HIVE_ORC_BLOCK_PADDING_TOLERANCE("hive.exec.orc.block.padding.tolerance", 
0.05f,

http://git-wip-us.apache.org/repos/asf/hive/blob/39decb0b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
index c9ba713..3ca0a6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
@@ -491,6 +491,13 @@ public final class OrcFile {
   return this;
 }
 
+public int getBufferSize() {
+  return bufferSizeValue;
+}
+
+public long getStripeSize() {
+  return stripeSizeValue;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/39decb0b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index efe5293..693ffd5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
@@ -249,19 +250,36 @@ public class OrcRecordUpdater implements RecordUpdater {
   flushLengths = null;
 }
 OrcFile.WriterOptions writerOptions = null;
-if (options instanceof OrcOptions) {
-  writerOptions = ((OrcOptions) options).getOrcOptions();
-}
-if (writerOptions == null) {
-  writerOptions = OrcFile.writerOptions(options.getTableProperties(),
-  options.getConfiguration());
-}
-writerOptions.fileSystem(fs).callback(indexBuilder);
-if (!options.isWritingBase()) {
+// If writing delta dirs, we need to make a clone of original options, to 
avoid polluting it for
+// the base writer
+if (options.isWritingBase()) {
+  if (options instanceof OrcOptions) {
+writerOptions = ((OrcOptions) options).getOrcOptions();
+  }
+  if (writerOptions == null) {
+writerOptions = OrcFile.writerOptions(options.getTableProperties(),
+options.getConfiguration());
+  }
+} else {  // delta 

hive git commit: HIVE-13961 : ACID: Major compaction fails to include the original bucket files if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 53e01e4a5 -> d43938ca1


HIVE-13961 : ACID: Major compaction fails to include the original bucket files 
if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d43938ca
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d43938ca
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d43938ca

Branch: refs/heads/master
Commit: d43938ca127a849e50e8eaddd6313d409cea6770
Parents: 53e01e4
Author: Wei Zheng 
Authored: Wed Jun 15 10:19:30 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:19:30 2016 -0700

--
 .../hive/ql/txn/compactor/CompactorMR.java  |   7 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 308 ++-
 2 files changed, 310 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d43938ca/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 71e69d5..6caca98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -254,10 +254,9 @@ public class CompactorMR {
   }
 }
 
-if (parsedDeltas.size() == 0) {
-  // Seriously, no deltas?  Can't compact that.
-  LOG.error(  "No delta files found to compact in " + sd.getLocation());
-  //couldn't someone want to run a Major compaction to convert old table 
to ACID?
+if (parsedDeltas.size() == 0 && dir.getOriginalFiles() == null) {
+  // Skip compaction if there's no delta files AND there's no original 
files
+  LOG.error("No delta files or original files found to compact in " + 
sd.getLocation());
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d43938ca/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index a1bd0fb..e76c925 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -270,10 +270,15 @@ public class TestTxnCommands2 {
 
   /**
* Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Insert a row to ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
* @throws Exception
*/
   @Test
-  public void testNonAcidToAcidConversionAndMajorCompaction() throws Exception 
{
+  public void testNonAcidToAcidConversion1() throws Exception {
 FileSystem fs = FileSystem.get(hiveConf);
 FileStatus[] status;
 
@@ -394,6 +399,307 @@ public class TestTxnCommands2 {
 Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
   }
 
+  /**
+   * Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Update the existing row in ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
+   * @throws Exception
+   */
+  @Test
+  public void testNonAcidToAcidConversion2() throws Exception {
+FileSystem fs = FileSystem.get(hiveConf);
+FileStatus[] status;
+
+// 1. Insert a row to Non-ACID table
+runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(1,2)");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+(Table.NONACIDORCTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+// There should be 2 original bucket files in the location (00_0 and 
01_0)
+Assert.assertEquals(BUCKET_COUNT, status.length);
+for (int i = 0; i < status.length; i++) {
+  Assert.assertTrue(status[i].getPath().getName().matches("0[01]_0"));
+}
+List rs = runStatementOnDriver("select a,b from " + 
Table.NONACIDORCTBL);
+int [][] resultData = new int[][] {{1, 2}};
+Assert.assertEquals(stringifyValues(resultData), rs);
+rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+int resultCount = 1;
+Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
+
+// 2. Convert NONACIDORCTBL to ACID table
+runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET 
TBLPROPERTIES ('transactional'='true')");
+status = fs.listStatus(new 

hive git commit: HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 3eb16ebec -> 45c1775e1


HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/45c1775e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/45c1775e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/45c1775e

Branch: refs/heads/branch-2.1
Commit: 45c1775e1b32234d576e7a474a372d9fa9326053
Parents: 3eb16eb
Author: Wei Zheng 
Authored: Tue Jun 14 15:30:56 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:27:48 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 45 ++--
 1 file changed, 22 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/45c1775e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 9988eec..5b6f20c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.lockmgr;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -84,7 +83,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   private static ScheduledExecutorService heartbeatExecutorService = null;
   private ScheduledFuture heartbeatTask = null;
   private Runnable shutdownRunner = null;
-  static final int SHUTDOWN_HOOK_PRIORITY = 0;
+  private static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   DbTxnManager() {
 shutdownRunner = new Runnable() {
@@ -161,10 +160,11 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 getLockManager();
 
 boolean atLeastOneLock = false;
+queryId = plan.getQueryId();
 
-LockRequestBuilder rqstBuilder = new LockRequestBuilder(plan.getQueryId());
+LockRequestBuilder rqstBuilder = new LockRequestBuilder(queryId);
 //link queryId to txnId
-LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + plan.getQueryId());
+LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + queryId);
 rqstBuilder.setTransactionId(txnId)
 .setUser(username);
 
@@ -304,7 +304,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // Make sure we need locks.  It's possible there's nothing to lock in
 // this operation.
 if (!atLeastOneLock) {
-  LOG.debug("No locks needed for queryId" + plan.getQueryId());
+  LOG.debug("No locks needed for queryId" + queryId);
   return null;
 }
 
@@ -312,7 +312,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 if(isTxnOpen()) {
   statementId++;
 }
-LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), 
isBlocking, locks);
+LockState lockState = lockMgr.lock(rqstBuilder.build(), queryId, 
isBlocking, locks);
 ctx.setHiveLocks(locks);
 return lockState;
   }
@@ -324,15 +324,13 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 return t;
   }
   /**
-   * This is for testing only.
+   * This is for testing only. Normally client should call {@link 
#acquireLocks(QueryPlan, Context, String, boolean)}
* @param delay time to delay for first heartbeat
-   * @return null if no locks were needed
*/
   @VisibleForTesting
   void acquireLocksWithHeartbeatDelay(QueryPlan plan, Context ctx, String 
username, long delay) throws LockException {
 acquireLocks(plan, ctx, username, true);
 ctx.setHeartbeater(startHeartbeat(delay));
-queryId = plan.getQueryId();
   }
 
 
@@ -439,24 +437,25 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 }
   }
 
-  private Heartbeater startHeartbeat() throws LockException {
-return startHeartbeat(0);
-  }
-
   /**
-   *  This is for testing only.  Normally client should call {@link 
#startHeartbeat()}
-   *  Make the heartbeater start before an initial delay period.
-   *  @param delay time to delay before first execution, in milliseconds
-   *  @return heartbeater
+   * Start the heartbeater threadpool and return the task.
+   * @param initialDelay time to delay before first execution, in milliseconds
+   * @return heartbeater
*/
-  Heartbeater startHeartbeat(long delay) throws 

hive git commit: HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei Zheng, reviewed by Eugene Koifman)

2016-06-08 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master f4707b1f1 -> edc5974a1


HIVE-13972 : Resolve class dependency issue introduced by HIVE-13354 (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/edc5974a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/edc5974a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/edc5974a

Branch: refs/heads/master
Commit: edc5974a1e87dc140850c6be09c4979c2ccae881
Parents: f4707b1
Author: Wei Zheng 
Authored: Wed Jun 8 13:25:20 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 8 13:25:20 2016 -0700

--
 .../hadoop/hive/common/StringableMap.java   | 80 
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  2 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java | 54 -
 .../hive/ql/txn/compactor/CompactorMR.java  |  2 +-
 .../hive/ql/txn/compactor/TestWorker.java   |  2 +-
 5 files changed, 83 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/edc5974a/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/StringableMap.java 
b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
new file mode 100644
index 000..8a93c0f
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/StringableMap.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * A utility class that can convert a HashMap of Properties into a colon 
separated string,
+ * and can take the same format of string and convert it to a HashMap of 
Properties.
+ */
+public class StringableMap extends HashMap {
+
+  public StringableMap(String s) {
+String[] parts = s.split(":", 2);
+// read that many chars
+int numElements = Integer.parseInt(parts[0]);
+s = parts[1];
+for (int i = 0; i < numElements; i++) {
+  parts = s.split(":", 2);
+  int len = Integer.parseInt(parts[0]);
+  String key = null;
+  if (len > 0) key = parts[1].substring(0, len);
+  parts = parts[1].substring(len).split(":", 2);
+  len = Integer.parseInt(parts[0]);
+  String value = null;
+  if (len > 0) value = parts[1].substring(0, len);
+  s = parts[1].substring(len);
+  put(key, value);
+}
+  }
+
+  public StringableMap(Map m) {
+super(m);
+  }
+
+  @Override
+  public String toString() {
+StringBuilder buf = new StringBuilder();
+buf.append(size());
+buf.append(':');
+if (size() > 0) {
+  for (Map.Entry entry : entrySet()) {
+int length = (entry.getKey() == null) ? 0 : entry.getKey().length();
+buf.append(entry.getKey() == null ? 0 : length);
+buf.append(':');
+if (length > 0) buf.append(entry.getKey());
+length = (entry.getValue() == null) ? 0 : entry.getValue().length();
+buf.append(length);
+buf.append(':');
+if (length > 0) buf.append(entry.getValue());
+  }
+}
+return buf.toString();
+  }
+
+  public Properties toProperties() {
+Properties props = new Properties();
+props.putAll(this);
+return props;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/edc5974a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index dd7054b..7a89a0c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -37,9 +37,9 @@ import 

[3/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 3c9e038..2d82c92 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -10792,14 +10792,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size560 = 0;
-$_etype563 = 0;
-$xfer += $input->readListBegin($_etype563, $_size560);
-for ($_i564 = 0; $_i564 < $_size560; ++$_i564)
+$_size569 = 0;
+$_etype572 = 0;
+$xfer += $input->readListBegin($_etype572, $_size569);
+for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
 {
-  $elem565 = null;
-  $xfer += $input->readString($elem565);
-  $this->success []= $elem565;
+  $elem574 = null;
+  $xfer += $input->readString($elem574);
+  $this->success []= $elem574;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -10835,9 +10835,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter566)
+  foreach ($this->success as $iter575)
   {
-$xfer += $output->writeString($iter566);
+$xfer += $output->writeString($iter575);
   }
 }
 $output->writeListEnd();
@@ -10968,14 +10968,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size567 = 0;
-$_etype570 = 0;
-$xfer += $input->readListBegin($_etype570, $_size567);
-for ($_i571 = 0; $_i571 < $_size567; ++$_i571)
+$_size576 = 0;
+$_etype579 = 0;
+$xfer += $input->readListBegin($_etype579, $_size576);
+for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
 {
-  $elem572 = null;
-  $xfer += $input->readString($elem572);
-  $this->success []= $elem572;
+  $elem581 = null;
+  $xfer += $input->readString($elem581);
+  $this->success []= $elem581;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11011,9 +11011,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter573)
+  foreach ($this->success as $iter582)
   {
-$xfer += $output->writeString($iter573);
+$xfer += $output->writeString($iter582);
   }
 }
 $output->writeListEnd();
@@ -12014,18 +12014,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size574 = 0;
-$_ktype575 = 0;
-$_vtype576 = 0;
-$xfer += $input->readMapBegin($_ktype575, $_vtype576, $_size574);
-for ($_i578 = 0; $_i578 < $_size574; ++$_i578)
+$_size583 = 0;
+$_ktype584 = 0;
+$_vtype585 = 0;
+$xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583);
+for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
 {
-  $key579 = '';
-  $val580 = new \metastore\Type();
-  $xfer += $input->readString($key579);
-  $val580 = new \metastore\Type();
-  $xfer += $val580->read($input);
-  $this->success[$key579] = $val580;
+  $key588 = '';
+  $val589 = new \metastore\Type();
+  $xfer += $input->readString($key588);
+  $val589 = new \metastore\Type();
+  $xfer += $val589->read($input);
+  $this->success[$key588] = $val589;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -12061,10 +12061,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter581 => $viter582)
+  foreach ($this->success as $kiter590 => $viter591)
   {
-$xfer += $output->writeString($kiter581);
-$xfer += $viter582->write($output);
+$xfer += $output->writeString($kiter590);
+$xfer += $viter591->write($output);
  

[6/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index f8ca7cd..79460a8 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -14413,6 +14413,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -14479,6 +14484,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size603;
+::apache::thrift::protocol::TType _ktype604;
+::apache::thrift::protocol::TType _vtype605;
+xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603);
+uint32_t _i607;
+for (_i607 = 0; _i607 < _size603; ++_i607)
+{
+  std::string _key608;
+  xfer += iprot->readString(_key608);
+  std::string& _val609 = this->properties[_key608];
+  xfer += iprot->readString(_val609);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -14524,6 +14552,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter610;
+  for (_iter610 = this->properties.begin(); _iter610 != 
this->properties.end(); ++_iter610)
+  {
+xfer += oprot->writeString(_iter610->first);
+xfer += oprot->writeString(_iter610->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -14536,24 +14578,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other603) {
-  dbname = other603.dbname;
-  tablename = other603.tablename;
-  partitionname = other603.partitionname;
-  type = other603.type;
-  runas = other603.runas;
-  __isset = other603.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other604) {
-  dbname = other604.dbname;
-  tablename = other604.tablename;
-  partitionname = other604.partitionname;
-  type = other604.type;
-  runas = other604.runas;
-  __isset = other604.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other611) {
+  dbname = other611.dbname;
+  tablename = other611.tablename;
+  partitionname = other611.partitionname;
+  type = other611.type;
+  runas = other611.runas;
+  properties = other611.properties;
+  __isset = other611.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other612) {
+  dbname = other612.dbname;
+  tablename = other612.tablename;
+  partitionname = other612.partitionname;
+  type = other612.type;
+  runas = other612.runas;
+  properties = other612.properties;
+  __isset = other612.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -14564,6 +14609,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -14616,11 +14662,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[4/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 13a8b71..cb5dec9 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -28842,13 +28842,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list632 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list632.size);
-  String _elem633;
-  for (int _i634 = 0; _i634 < _list632.size; ++_i634)
+  org.apache.thrift.protocol.TList _list642 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list642.size);
+  String _elem643;
+  for (int _i644 = 0; _i644 < _list642.size; ++_i644)
   {
-_elem633 = iprot.readString();
-struct.success.add(_elem633);
+_elem643 = iprot.readString();
+struct.success.add(_elem643);
   }
   iprot.readListEnd();
 }
@@ -28883,9 +28883,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter635 : struct.success)
+for (String _iter645 : struct.success)
 {
-  oprot.writeString(_iter635);
+  oprot.writeString(_iter645);
 }
 oprot.writeListEnd();
   }
@@ -28924,9 +28924,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter636 : struct.success)
+for (String _iter646 : struct.success)
 {
-  oprot.writeString(_iter636);
+  oprot.writeString(_iter646);
 }
   }
 }
@@ -28941,13 +28941,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list637 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list637.size);
-String _elem638;
-for (int _i639 = 0; _i639 < _list637.size; ++_i639)
+org.apache.thrift.protocol.TList _list647 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list647.size);
+String _elem648;
+for (int _i649 = 0; _i649 < _list647.size; ++_i649)
 {
-  _elem638 = iprot.readString();
-  struct.success.add(_elem638);
+  _elem648 = iprot.readString();
+  struct.success.add(_elem648);
 }
   }
   struct.setSuccessIsSet(true);
@@ -29601,13 +29601,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list640 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list640.size);
-  String _elem641;
-  for (int _i642 = 0; _i642 < _list640.size; ++_i642)
+  org.apache.thrift.protocol.TList _list650 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list650.size);
+  String _elem651;
+  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
   {
-_elem641 = iprot.readString();
-struct.success.add(_elem641);
+_elem651 = iprot.readString();
+struct.success.add(_elem651);
   }
   iprot.readListEnd();
 }
@@ -29642,9 +29642,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[5/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index e028ecb..d3fc92a 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -43,6 +43,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap();
   static {
@@ -55,6 +56,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase properties; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -66,7 +68,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -91,6 +94,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -145,6 +150,10 @@ public class CompactionRequest implements 
org.apache.thrift.TBase __this__properties = new 
HashMap(other.properties);
+  this.properties = __this__properties;
+}
   }
 
   public CompactionRequest deepCopy() {
@@ -195,6 +208,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase();
+}
+this.properties.put(key, val);
+  }
+
+  public Map getProperties() {
+return this.properties;
+  }
+
+  public void setProperties(Map properties) {
+this.properties = properties;
+  }
+
+  public void unsetProperties() {
+this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetProperties() {
+return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+if (!value) {
+  this.properties = null;
+}
+  }
+
   public void setFieldValue(_Fields field, Object value) {
 switch (field) {
 case DBNAME:
@@ -362,6 +410,14 @@ public class CompactionRequest implements 
org.apache.thrift.TBase)value);
+  }
+  break;
+
 }
   }
 
@@ -382,6 +438,9 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map524.size);
+String _key525;
+String _val526;
+for (int _i527 = 0; _i527 < _map524.size; ++_i527)
+{
+  _key525 = iprot.readString();
+  _val526 = iprot.readString();
+  struct.properties.put(_key525, _val526);
+}
+iprot.readMapEnd();
+  }
+  struct.setPropertiesIsSet(true);
+} else { 
+  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+}
+break;
   default:
 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
 }
@@ -758,6 +873,21 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter528 : 
struct.properties.entrySet())
+{
+  oprot.writeString(_iter528.getKey());
+  oprot.writeString(_iter528.getValue());
+}
+oprot.writeMapEnd();
+  }
+  oprot.writeFieldEnd();
+}
+  }
   oprot.writeFieldStop();
   oprot.writeStructEnd();
 }
@@ -785,13 +915,26 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter529 : 
struct.properties.entrySet())
+  {
+oprot.writeString(_iter529.getKey());
+oprot.writeString(_iter529.getValue());
+  }
+}
+  }
 }
 
 @Override
@@ -803,7 +946,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map530.size);
+  String _key531;
+  String _val532;
+  for (int _i533 = 0; _i533 < _map530.size; ++_i533)
+  {
+_key531 = iprot.readString();
+_val532 = iprot.readString();
+struct.properties.put(_key531, _val532);
+  }
+}
+struct.setPropertiesIsSet(true);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
--
diff --git 

[7/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 298384c..5a35a50 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size773;
-::apache::thrift::protocol::TType _etype776;
-xfer += iprot->readListBegin(_etype776, _size773);
-this->success.resize(_size773);
-uint32_t _i777;
-for (_i777 = 0; _i777 < _size773; ++_i777)
+uint32_t _size781;
+::apache::thrift::protocol::TType _etype784;
+xfer += iprot->readListBegin(_etype784, _size781);
+this->success.resize(_size781);
+uint32_t _i785;
+for (_i785 = 0; _i785 < _size781; ++_i785)
 {
-  xfer += iprot->readString(this->success[_i777]);
+  xfer += iprot->readString(this->success[_i785]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter778;
-  for (_iter778 = this->success.begin(); _iter778 != this->success.end(); 
++_iter778)
+  std::vector ::const_iterator _iter786;
+  for (_iter786 = this->success.begin(); _iter786 != this->success.end(); 
++_iter786)
   {
-xfer += oprot->writeString((*_iter778));
+xfer += oprot->writeString((*_iter786));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size779;
-::apache::thrift::protocol::TType _etype782;
-xfer += iprot->readListBegin(_etype782, _size779);
-(*(this->success)).resize(_size779);
-uint32_t _i783;
-for (_i783 = 0; _i783 < _size779; ++_i783)
+uint32_t _size787;
+::apache::thrift::protocol::TType _etype790;
+xfer += iprot->readListBegin(_etype790, _size787);
+(*(this->success)).resize(_size787);
+uint32_t _i791;
+for (_i791 = 0; _i791 < _size787; ++_i791)
 {
-  xfer += iprot->readString((*(this->success))[_i783]);
+  xfer += iprot->readString((*(this->success))[_i791]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size784;
-::apache::thrift::protocol::TType _etype787;
-xfer += iprot->readListBegin(_etype787, _size784);
-this->success.resize(_size784);
-uint32_t _i788;
-for (_i788 = 0; _i788 < _size784; ++_i788)
+uint32_t _size792;
+::apache::thrift::protocol::TType _etype795;
+xfer += iprot->readListBegin(_etype795, _size792);
+this->success.resize(_size792);
+uint32_t _i796;
+for (_i796 = 0; _i796 < _size792; ++_i796)
 {
-  xfer += iprot->readString(this->success[_i788]);
+  xfer += iprot->readString(this->success[_i796]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter789;
-  for (_iter789 = this->success.begin(); _iter789 != this->success.end(); 
++_iter789)
+  std::vector ::const_iterator _iter797;
+  for (_iter797 = this->success.begin(); _iter797 != this->success.end(); 
++_iter797)
   {
-xfer += oprot->writeString((*_iter789));
+xfer += oprot->writeString((*_iter797));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[2/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 5aef35c..f505208 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -14373,6 +14373,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -14397,6 +14401,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -14415,6 +14431,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -14472,6 +14491,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size465 = 0;
+$_ktype466 = 0;
+$_vtype467 = 0;
+$xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465);
+for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+{
+  $key470 = '';
+  $val471 = '';
+  $xfer += $input->readString($key470);
+  $xfer += $input->readString($val471);
+  $this->properties[$key470] = $val471;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -14510,6 +14549,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter472 => $viter473)
+  {
+$xfer += $output->writeString($kiter472);
+$xfer += $output->writeString($viter473);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -14946,15 +15003,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size465 = 0;
-$_etype468 = 0;
-$xfer += $input->readListBegin($_etype468, $_size465);
-for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+$_size474 = 0;
+$_etype477 = 0;
+$xfer += $input->readListBegin($_etype477, $_size474);
+for ($_i478 = 0; $_i478 < $_size474; ++$_i478)
 {
-  $elem470 = null;
-  $elem470 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem470->read($input);
-  $this->compacts []= $elem470;
+  $elem479 = null;
+  $elem479 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem479->read($input);
+  $this->compacts []= $elem479;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -14982,9 +15039,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter471)
+  foreach ($this->compacts as $iter480)
   {
-$xfer += $iter471->write($output);
+$xfer += $iter480->write($output);
   }
 }
 $output->writeListEnd();
@@ -15113,14 +15170,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size472 = 0;
-$_etype475 = 0;
-$xfer += $input->readListBegin($_etype475, $_size472);
- 

[8/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c57a5961
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c57a5961
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c57a5961

Branch: refs/heads/master
Commit: c57a59611fa168ee38c6ee0ee60b1d6c4994f9f8
Parents: 793681c
Author: Wei Zheng 
Authored: Fri May 27 11:20:14 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 11:20:14 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  161 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/hive-txn-schema-2.1.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../derby/upgrade-2.0.0-to-2.1.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../upgrade/mssql/hive-schema-2.1.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/hive-txn-schema-2.1.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/hive-txn-schema-2.1.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../postgres/hive-txn-schema-2.1.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2020 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  980 
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java   |   32 +-
 .../api/GetFileMetadataByExprResult.java|   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java|   44 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2188 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1232 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  315 +--
 .../hive_metastore/ThriftHiveMetastore.py   |  830 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  197 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   56 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 65 files changed, 4780 insertions(+), 4145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 

[1/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 793681c76 -> c57a59611


http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4db9680..8d88cd7 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -9984,6 +9984,7 @@ class CompactionRequest:
- partitionname
- type
- runas
+   - properties
   """
 
   thrift_spec = (
@@ -9993,14 +9994,16 @@ class CompactionRequest:
 (3, TType.STRING, 'partitionname', None, None, ), # 3
 (4, TType.I32, 'type', None, None, ), # 4
 (5, TType.STRING, 'runas', None, None, ), # 5
+(6, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, 
), # 6
   )
 
-  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None,):
+  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None, properties=None,):
 self.dbname = dbname
 self.tablename = tablename
 self.partitionname = partitionname
 self.type = type
 self.runas = runas
+self.properties = properties
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -10036,6 +10039,17 @@ class CompactionRequest:
   self.runas = iprot.readString()
 else:
   iprot.skip(ftype)
+  elif fid == 6:
+if ftype == TType.MAP:
+  self.properties = {}
+  (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
+  for _i466 in xrange(_size462):
+_key467 = iprot.readString()
+_val468 = iprot.readString()
+self.properties[_key467] = _val468
+  iprot.readMapEnd()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -10066,6 +10080,14 @@ class CompactionRequest:
   oprot.writeFieldBegin('runas', TType.STRING, 5)
   oprot.writeString(self.runas)
   oprot.writeFieldEnd()
+if self.properties is not None:
+  oprot.writeFieldBegin('properties', TType.MAP, 6)
+  oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
+  for kiter469,viter470 in self.properties.items():
+oprot.writeString(kiter469)
+oprot.writeString(viter470)
+  oprot.writeMapEnd()
+  oprot.writeFieldEnd()
 oprot.writeFieldStop()
 oprot.writeStructEnd()
 
@@ -10086,6 +10108,7 @@ class CompactionRequest:
 value = (value * 31) ^ hash(self.partitionname)
 value = (value * 31) ^ hash(self.type)
 value = (value * 31) ^ hash(self.runas)
+value = (value * 31) ^ hash(self.properties)
 return value
 
   def __repr__(self):
@@ -10387,11 +10410,11 @@ class ShowCompactResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.compacts = []
-  (_etype465, _size462) = iprot.readListBegin()
-  for _i466 in xrange(_size462):
-_elem467 = ShowCompactResponseElement()
-_elem467.read(iprot)
-self.compacts.append(_elem467)
+  (_etype474, _size471) = iprot.readListBegin()
+  for _i475 in xrange(_size471):
+_elem476 = ShowCompactResponseElement()
+_elem476.read(iprot)
+self.compacts.append(_elem476)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10408,8 +10431,8 @@ class ShowCompactResponse:
 if self.compacts is not None:
   oprot.writeFieldBegin('compacts', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-  for iter468 in self.compacts:
-iter468.write(oprot)
+  for iter477 in self.compacts:
+iter477.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -10490,10 +10513,10 @@ class AddDynamicPartitions:
   elif fid == 4:
 if ftype == TType.LIST:
   self.partitionnames = []
-  (_etype472, _size469) = iprot.readListBegin()
-  for _i473 in xrange(_size469):
-_elem474 = iprot.readString()
-self.partitionnames.append(_elem474)
+  (_etype481, _size478) = iprot.readListBegin()
+  for _i482 in xrange(_size478):
+_elem483 = iprot.readString()
+self.partitionnames.append(_elem483)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10527,8 +10550,8 @@ class AddDynamicPartitions:
 if self.partitionnames is not None:
   oprot.writeFieldBegin('partitionnames', TType.LIST, 4)
   oprot.writeListBegin(TType.STRING, len(self.partitionnames))
-   

[1/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 abaf88248 -> 6e0504d9a


http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index f8798b7..0601a29 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -169,7 +169,7 @@ class CompactionTxnHandler extends TxnHandler {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
 String s = "select cq_id, cq_database, cq_table, cq_partition, " +
-  "cq_type from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE 
+ "'";
+  "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" 
+ INITIATED_STATE + "'";
 LOG.debug("Going to execute query <" + s + ">");
 rs = stmt.executeQuery(s);
 if (!rs.next()) {
@@ -185,6 +185,7 @@ class CompactionTxnHandler extends TxnHandler {
   info.tableName = rs.getString(3);
   info.partName = rs.getString(4);
   info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+  info.properties = rs.getString(6);
   // Now, update this record as being worked on by this worker.
   long now = getDbTime(dbConn);
   s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', 
" +
@@ -329,7 +330,7 @@ class CompactionTxnHandler extends TxnHandler {
   try {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
-rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, 
CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE 
CQ_ID = " + info.id);
+rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, 
CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from 
COMPACTION_QUEUE WHERE CQ_ID = " + info.id);
 if(rs.next()) {
   info = CompactionInfo.loadFullFromCompactionQueue(rs);
 }
@@ -345,7 +346,7 @@ class CompactionTxnHandler extends TxnHandler {
   LOG.debug("Going to rollback");
   dbConn.rollback();
 }
-pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, 
CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, 
CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, 
?,?,?,?)");
 info.state = SUCCEEDED_STATE;
 CompactionInfo.insertIntoCompletedCompactions(pStmt, info, 
getDbTime(dbConn));
 updCount = pStmt.executeUpdate();
@@ -838,7 +839,7 @@ class CompactionTxnHandler extends TxnHandler {
   try {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
-rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, 
CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE 
CQ_ID = " + ci.id);
+rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, 
CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from 
COMPACTION_QUEUE WHERE CQ_ID = " + ci.id);
 if(rs.next()) {
   ci = CompactionInfo.loadFullFromCompactionQueue(rs);
   String s = "delete from COMPACTION_QUEUE where cq_id = " + ci.id;
@@ -866,7 +867,7 @@ class CompactionTxnHandler extends TxnHandler {
 }
 close(rs, stmt, null);
 
-pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, 
CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, 
CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, 
?,?,?,?)");
 

[5/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 9e2e883..f2a715a 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -12834,6 +12834,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -12900,6 +12905,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size561;
+::apache::thrift::protocol::TType _ktype562;
+::apache::thrift::protocol::TType _vtype563;
+xfer += iprot->readMapBegin(_ktype562, _vtype563, _size561);
+uint32_t _i565;
+for (_i565 = 0; _i565 < _size561; ++_i565)
+{
+  std::string _key566;
+  xfer += iprot->readString(_key566);
+  std::string& _val567 = this->properties[_key566];
+  xfer += iprot->readString(_val567);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -12945,6 +12973,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter568;
+  for (_iter568 = this->properties.begin(); _iter568 != 
this->properties.end(); ++_iter568)
+  {
+xfer += oprot->writeString(_iter568->first);
+xfer += oprot->writeString(_iter568->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -12957,24 +12999,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other561) {
-  dbname = other561.dbname;
-  tablename = other561.tablename;
-  partitionname = other561.partitionname;
-  type = other561.type;
-  runas = other561.runas;
-  __isset = other561.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other562) {
-  dbname = other562.dbname;
-  tablename = other562.tablename;
-  partitionname = other562.partitionname;
-  type = other562.type;
-  runas = other562.runas;
-  __isset = other562.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other569) {
+  dbname = other569.dbname;
+  tablename = other569.tablename;
+  partitionname = other569.partitionname;
+  type = other569.type;
+  runas = other569.runas;
+  properties = other569.properties;
+  __isset = other569.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other570) {
+  dbname = other570.dbname;
+  tablename = other570.tablename;
+  partitionname = other570.partitionname;
+  type = other570.type;
+  runas = other570.runas;
+  properties = other570.properties;
+  __isset = other570.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -12985,6 +13030,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -13037,11 +13083,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[6/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 028c647..c2a208f 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size659;
-::apache::thrift::protocol::TType _etype662;
-xfer += iprot->readListBegin(_etype662, _size659);
-this->success.resize(_size659);
-uint32_t _i663;
-for (_i663 = 0; _i663 < _size659; ++_i663)
+uint32_t _size667;
+::apache::thrift::protocol::TType _etype670;
+xfer += iprot->readListBegin(_etype670, _size667);
+this->success.resize(_size667);
+uint32_t _i671;
+for (_i671 = 0; _i671 < _size667; ++_i671)
 {
-  xfer += iprot->readString(this->success[_i663]);
+  xfer += iprot->readString(this->success[_i671]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter664;
-  for (_iter664 = this->success.begin(); _iter664 != this->success.end(); 
++_iter664)
+  std::vector ::const_iterator _iter672;
+  for (_iter672 = this->success.begin(); _iter672 != this->success.end(); 
++_iter672)
   {
-xfer += oprot->writeString((*_iter664));
+xfer += oprot->writeString((*_iter672));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size665;
-::apache::thrift::protocol::TType _etype668;
-xfer += iprot->readListBegin(_etype668, _size665);
-(*(this->success)).resize(_size665);
-uint32_t _i669;
-for (_i669 = 0; _i669 < _size665; ++_i669)
+uint32_t _size673;
+::apache::thrift::protocol::TType _etype676;
+xfer += iprot->readListBegin(_etype676, _size673);
+(*(this->success)).resize(_size673);
+uint32_t _i677;
+for (_i677 = 0; _i677 < _size673; ++_i677)
 {
-  xfer += iprot->readString((*(this->success))[_i669]);
+  xfer += iprot->readString((*(this->success))[_i677]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size670;
-::apache::thrift::protocol::TType _etype673;
-xfer += iprot->readListBegin(_etype673, _size670);
-this->success.resize(_size670);
-uint32_t _i674;
-for (_i674 = 0; _i674 < _size670; ++_i674)
+uint32_t _size678;
+::apache::thrift::protocol::TType _etype681;
+xfer += iprot->readListBegin(_etype681, _size678);
+this->success.resize(_size678);
+uint32_t _i682;
+for (_i682 = 0; _i682 < _size678; ++_i682)
 {
-  xfer += iprot->readString(this->success[_i674]);
+  xfer += iprot->readString(this->success[_i682]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter675;
-  for (_iter675 = this->success.begin(); _iter675 != this->success.end(); 
++_iter675)
+  std::vector ::const_iterator _iter683;
+  for (_iter683 = this->success.begin(); _iter683 != this->success.end(); 
++_iter683)
   {
-xfer += oprot->writeString((*_iter675));
+xfer += oprot->writeString((*_iter683));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[2/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 045864a..925eea4 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -12797,6 +12797,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -12821,6 +12825,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -12839,6 +12855,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -12896,6 +12915,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size437 = 0;
+$_ktype438 = 0;
+$_vtype439 = 0;
+$xfer += $input->readMapBegin($_ktype438, $_vtype439, $_size437);
+for ($_i441 = 0; $_i441 < $_size437; ++$_i441)
+{
+  $key442 = '';
+  $val443 = '';
+  $xfer += $input->readString($key442);
+  $xfer += $input->readString($val443);
+  $this->properties[$key442] = $val443;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -12934,6 +12973,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter444 => $viter445)
+  {
+$xfer += $output->writeString($kiter444);
+$xfer += $output->writeString($viter445);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -13370,15 +13427,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size437 = 0;
-$_etype440 = 0;
-$xfer += $input->readListBegin($_etype440, $_size437);
-for ($_i441 = 0; $_i441 < $_size437; ++$_i441)
+$_size446 = 0;
+$_etype449 = 0;
+$xfer += $input->readListBegin($_etype449, $_size446);
+for ($_i450 = 0; $_i450 < $_size446; ++$_i450)
 {
-  $elem442 = null;
-  $elem442 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem442->read($input);
-  $this->compacts []= $elem442;
+  $elem451 = null;
+  $elem451 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem451->read($input);
+  $this->compacts []= $elem451;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -13406,9 +13463,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter443)
+  foreach ($this->compacts as $iter452)
   {
-$xfer += $iter443->write($output);
+$xfer += $iter452->write($output);
   }
 }
 $output->writeListEnd();
@@ -13537,14 +13594,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size444 = 0;
-$_etype447 = 0;
-$xfer += $input->readListBegin($_etype447, $_size444);
- 

[7/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e0504d9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e0504d9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e0504d9

Branch: refs/heads/branch-1
Commit: 6e0504d9a8552471f7137a513abe2a0a15c124de
Parents: abaf882
Author: Wei Zheng 
Authored: Fri May 27 15:13:53 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 15:13:53 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  164 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 1814 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  640 +++---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 1948 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1098 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  171 +-
 .../hive_metastore/ThriftHiveMetastore.py   |  738 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  103 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   54 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 49 files changed, 4016 insertions(+), 3393 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 37bbab8..9c8bcc1 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -14,6 +14,7 @@ import 
org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
@@ -34,6 +35,7 @@ import 

[4/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index e836154..957a256 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -25539,13 +25539,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list540 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list540.size);
-  String _elem541;
-  for (int _i542 = 0; _i542 < _list540.size; ++_i542)
+  org.apache.thrift.protocol.TList _list550 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list550.size);
+  String _elem551;
+  for (int _i552 = 0; _i552 < _list550.size; ++_i552)
   {
-_elem541 = iprot.readString();
-struct.success.add(_elem541);
+_elem551 = iprot.readString();
+struct.success.add(_elem551);
   }
   iprot.readListEnd();
 }
@@ -25580,9 +25580,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter543 : struct.success)
+for (String _iter553 : struct.success)
 {
-  oprot.writeString(_iter543);
+  oprot.writeString(_iter553);
 }
 oprot.writeListEnd();
   }
@@ -25621,9 +25621,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter544 : struct.success)
+for (String _iter554 : struct.success)
 {
-  oprot.writeString(_iter544);
+  oprot.writeString(_iter554);
 }
   }
 }
@@ -25638,13 +25638,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list545 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list545.size);
-String _elem546;
-for (int _i547 = 0; _i547 < _list545.size; ++_i547)
+org.apache.thrift.protocol.TList _list555 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list555.size);
+String _elem556;
+for (int _i557 = 0; _i557 < _list555.size; ++_i557)
 {
-  _elem546 = iprot.readString();
-  struct.success.add(_elem546);
+  _elem556 = iprot.readString();
+  struct.success.add(_elem556);
 }
   }
   struct.setSuccessIsSet(true);
@@ -26298,13 +26298,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list548 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list548.size);
-  String _elem549;
-  for (int _i550 = 0; _i550 < _list548.size; ++_i550)
+  org.apache.thrift.protocol.TList _list558 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list558.size);
+  String _elem559;
+  for (int _i560 = 0; _i560 < _list558.size; ++_i560)
   {
-_elem549 = iprot.readString();
-struct.success.add(_elem549);
+_elem559 = iprot.readString();
+struct.success.add(_elem559);
   }
   iprot.readListEnd();
 }
@@ -26339,9 +26339,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[3/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 438e368..c85150d 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -9428,14 +9428,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size479 = 0;
-$_etype482 = 0;
-$xfer += $input->readListBegin($_etype482, $_size479);
-for ($_i483 = 0; $_i483 < $_size479; ++$_i483)
+$_size488 = 0;
+$_etype491 = 0;
+$xfer += $input->readListBegin($_etype491, $_size488);
+for ($_i492 = 0; $_i492 < $_size488; ++$_i492)
 {
-  $elem484 = null;
-  $xfer += $input->readString($elem484);
-  $this->success []= $elem484;
+  $elem493 = null;
+  $xfer += $input->readString($elem493);
+  $this->success []= $elem493;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -9471,9 +9471,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter485)
+  foreach ($this->success as $iter494)
   {
-$xfer += $output->writeString($iter485);
+$xfer += $output->writeString($iter494);
   }
 }
 $output->writeListEnd();
@@ -9604,14 +9604,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size486 = 0;
-$_etype489 = 0;
-$xfer += $input->readListBegin($_etype489, $_size486);
-for ($_i490 = 0; $_i490 < $_size486; ++$_i490)
+$_size495 = 0;
+$_etype498 = 0;
+$xfer += $input->readListBegin($_etype498, $_size495);
+for ($_i499 = 0; $_i499 < $_size495; ++$_i499)
 {
-  $elem491 = null;
-  $xfer += $input->readString($elem491);
-  $this->success []= $elem491;
+  $elem500 = null;
+  $xfer += $input->readString($elem500);
+  $this->success []= $elem500;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -9647,9 +9647,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter492)
+  foreach ($this->success as $iter501)
   {
-$xfer += $output->writeString($iter492);
+$xfer += $output->writeString($iter501);
   }
 }
 $output->writeListEnd();
@@ -10650,18 +10650,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size493 = 0;
-$_ktype494 = 0;
-$_vtype495 = 0;
-$xfer += $input->readMapBegin($_ktype494, $_vtype495, $_size493);
-for ($_i497 = 0; $_i497 < $_size493; ++$_i497)
+$_size502 = 0;
+$_ktype503 = 0;
+$_vtype504 = 0;
+$xfer += $input->readMapBegin($_ktype503, $_vtype504, $_size502);
+for ($_i506 = 0; $_i506 < $_size502; ++$_i506)
 {
-  $key498 = '';
-  $val499 = new \metastore\Type();
-  $xfer += $input->readString($key498);
-  $val499 = new \metastore\Type();
-  $xfer += $val499->read($input);
-  $this->success[$key498] = $val499;
+  $key507 = '';
+  $val508 = new \metastore\Type();
+  $xfer += $input->readString($key507);
+  $val508 = new \metastore\Type();
+  $xfer += $val508->read($input);
+  $this->success[$key507] = $val508;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -10697,10 +10697,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter500 => $viter501)
+  foreach ($this->success as $kiter509 => $viter510)
   {
-$xfer += $output->writeString($kiter500);
-$xfer += $viter501->write($output);
+$xfer += $output->writeString($kiter509);
+$xfer += $viter510->write($output);
   }
 

[2/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 5aef35c..f505208 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -14373,6 +14373,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -14397,6 +14401,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -14415,6 +14431,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -14472,6 +14491,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size465 = 0;
+$_ktype466 = 0;
+$_vtype467 = 0;
+$xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465);
+for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+{
+  $key470 = '';
+  $val471 = '';
+  $xfer += $input->readString($key470);
+  $xfer += $input->readString($val471);
+  $this->properties[$key470] = $val471;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -14510,6 +14549,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter472 => $viter473)
+  {
+$xfer += $output->writeString($kiter472);
+$xfer += $output->writeString($viter473);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -14946,15 +15003,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size465 = 0;
-$_etype468 = 0;
-$xfer += $input->readListBegin($_etype468, $_size465);
-for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+$_size474 = 0;
+$_etype477 = 0;
+$xfer += $input->readListBegin($_etype477, $_size474);
+for ($_i478 = 0; $_i478 < $_size474; ++$_i478)
 {
-  $elem470 = null;
-  $elem470 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem470->read($input);
-  $this->compacts []= $elem470;
+  $elem479 = null;
+  $elem479 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem479->read($input);
+  $this->compacts []= $elem479;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -14982,9 +15039,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter471)
+  foreach ($this->compacts as $iter480)
   {
-$xfer += $iter471->write($output);
+$xfer += $iter480->write($output);
   }
 }
 $output->writeListEnd();
@@ -15113,14 +15170,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size472 = 0;
-$_etype475 = 0;
-$xfer += $input->readListBegin($_etype475, $_size472);
- 

[4/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 13a8b71..cb5dec9 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -28842,13 +28842,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list632 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list632.size);
-  String _elem633;
-  for (int _i634 = 0; _i634 < _list632.size; ++_i634)
+  org.apache.thrift.protocol.TList _list642 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list642.size);
+  String _elem643;
+  for (int _i644 = 0; _i644 < _list642.size; ++_i644)
   {
-_elem633 = iprot.readString();
-struct.success.add(_elem633);
+_elem643 = iprot.readString();
+struct.success.add(_elem643);
   }
   iprot.readListEnd();
 }
@@ -28883,9 +28883,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter635 : struct.success)
+for (String _iter645 : struct.success)
 {
-  oprot.writeString(_iter635);
+  oprot.writeString(_iter645);
 }
 oprot.writeListEnd();
   }
@@ -28924,9 +28924,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter636 : struct.success)
+for (String _iter646 : struct.success)
 {
-  oprot.writeString(_iter636);
+  oprot.writeString(_iter646);
 }
   }
 }
@@ -28941,13 +28941,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list637 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list637.size);
-String _elem638;
-for (int _i639 = 0; _i639 < _list637.size; ++_i639)
+org.apache.thrift.protocol.TList _list647 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list647.size);
+String _elem648;
+for (int _i649 = 0; _i649 < _list647.size; ++_i649)
 {
-  _elem638 = iprot.readString();
-  struct.success.add(_elem638);
+  _elem648 = iprot.readString();
+  struct.success.add(_elem648);
 }
   }
   struct.setSuccessIsSet(true);
@@ -29601,13 +29601,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list640 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list640.size);
-  String _elem641;
-  for (int _i642 = 0; _i642 < _list640.size; ++_i642)
+  org.apache.thrift.protocol.TList _list650 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list650.size);
+  String _elem651;
+  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
   {
-_elem641 = iprot.readString();
-struct.success.add(_elem641);
+_elem651 = iprot.readString();
+struct.success.add(_elem651);
   }
   iprot.readListEnd();
 }
@@ -29642,9 +29642,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[3/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 3c9e038..2d82c92 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -10792,14 +10792,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size560 = 0;
-$_etype563 = 0;
-$xfer += $input->readListBegin($_etype563, $_size560);
-for ($_i564 = 0; $_i564 < $_size560; ++$_i564)
+$_size569 = 0;
+$_etype572 = 0;
+$xfer += $input->readListBegin($_etype572, $_size569);
+for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
 {
-  $elem565 = null;
-  $xfer += $input->readString($elem565);
-  $this->success []= $elem565;
+  $elem574 = null;
+  $xfer += $input->readString($elem574);
+  $this->success []= $elem574;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -10835,9 +10835,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter566)
+  foreach ($this->success as $iter575)
   {
-$xfer += $output->writeString($iter566);
+$xfer += $output->writeString($iter575);
   }
 }
 $output->writeListEnd();
@@ -10968,14 +10968,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size567 = 0;
-$_etype570 = 0;
-$xfer += $input->readListBegin($_etype570, $_size567);
-for ($_i571 = 0; $_i571 < $_size567; ++$_i571)
+$_size576 = 0;
+$_etype579 = 0;
+$xfer += $input->readListBegin($_etype579, $_size576);
+for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
 {
-  $elem572 = null;
-  $xfer += $input->readString($elem572);
-  $this->success []= $elem572;
+  $elem581 = null;
+  $xfer += $input->readString($elem581);
+  $this->success []= $elem581;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11011,9 +11011,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter573)
+  foreach ($this->success as $iter582)
   {
-$xfer += $output->writeString($iter573);
+$xfer += $output->writeString($iter582);
   }
 }
 $output->writeListEnd();
@@ -12014,18 +12014,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size574 = 0;
-$_ktype575 = 0;
-$_vtype576 = 0;
-$xfer += $input->readMapBegin($_ktype575, $_vtype576, $_size574);
-for ($_i578 = 0; $_i578 < $_size574; ++$_i578)
+$_size583 = 0;
+$_ktype584 = 0;
+$_vtype585 = 0;
+$xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583);
+for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
 {
-  $key579 = '';
-  $val580 = new \metastore\Type();
-  $xfer += $input->readString($key579);
-  $val580 = new \metastore\Type();
-  $xfer += $val580->read($input);
-  $this->success[$key579] = $val580;
+  $key588 = '';
+  $val589 = new \metastore\Type();
+  $xfer += $input->readString($key588);
+  $val589 = new \metastore\Type();
+  $xfer += $val589->read($input);
+  $this->success[$key588] = $val589;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -12061,10 +12061,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter581 => $viter582)
+  foreach ($this->success as $kiter590 => $viter591)
   {
-$xfer += $output->writeString($kiter581);
-$xfer += $viter582->write($output);
+$xfer += $output->writeString($kiter590);
+$xfer += $viter591->write($output);
  

[5/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index e028ecb..d3fc92a 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -43,6 +43,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap();
   static {
@@ -55,6 +56,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase properties; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -66,7 +68,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -91,6 +94,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -145,6 +150,10 @@ public class CompactionRequest implements 
org.apache.thrift.TBase __this__properties = new 
HashMap(other.properties);
+  this.properties = __this__properties;
+}
   }
 
   public CompactionRequest deepCopy() {
@@ -195,6 +208,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase();
+}
+this.properties.put(key, val);
+  }
+
+  public Map getProperties() {
+return this.properties;
+  }
+
+  public void setProperties(Map properties) {
+this.properties = properties;
+  }
+
+  public void unsetProperties() {
+this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetProperties() {
+return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+if (!value) {
+  this.properties = null;
+}
+  }
+
   public void setFieldValue(_Fields field, Object value) {
 switch (field) {
 case DBNAME:
@@ -362,6 +410,14 @@ public class CompactionRequest implements 
org.apache.thrift.TBase)value);
+  }
+  break;
+
 }
   }
 
@@ -382,6 +438,9 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map524.size);
+String _key525;
+String _val526;
+for (int _i527 = 0; _i527 < _map524.size; ++_i527)
+{
+  _key525 = iprot.readString();
+  _val526 = iprot.readString();
+  struct.properties.put(_key525, _val526);
+}
+iprot.readMapEnd();
+  }
+  struct.setPropertiesIsSet(true);
+} else { 
+  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+}
+break;
   default:
 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
 }
@@ -758,6 +873,21 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter528 : 
struct.properties.entrySet())
+{
+  oprot.writeString(_iter528.getKey());
+  oprot.writeString(_iter528.getValue());
+}
+oprot.writeMapEnd();
+  }
+  oprot.writeFieldEnd();
+}
+  }
   oprot.writeFieldStop();
   oprot.writeStructEnd();
 }
@@ -785,13 +915,26 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter529 : 
struct.properties.entrySet())
+  {
+oprot.writeString(_iter529.getKey());
+oprot.writeString(_iter529.getValue());
+  }
+}
+  }
 }
 
 @Override
@@ -803,7 +946,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map530.size);
+  String _key531;
+  String _val532;
+  for (int _i533 = 0; _i533 < _map530.size; ++_i533)
+  {
+_key531 = iprot.readString();
+_val532 = iprot.readString();
+struct.properties.put(_key531, _val532);
+  }
+}
+struct.setPropertiesIsSet(true);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
--
diff --git 

[6/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index f8ca7cd..79460a8 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -14413,6 +14413,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -14479,6 +14484,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size603;
+::apache::thrift::protocol::TType _ktype604;
+::apache::thrift::protocol::TType _vtype605;
+xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603);
+uint32_t _i607;
+for (_i607 = 0; _i607 < _size603; ++_i607)
+{
+  std::string _key608;
+  xfer += iprot->readString(_key608);
+  std::string& _val609 = this->properties[_key608];
+  xfer += iprot->readString(_val609);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -14524,6 +14552,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter610;
+  for (_iter610 = this->properties.begin(); _iter610 != 
this->properties.end(); ++_iter610)
+  {
+xfer += oprot->writeString(_iter610->first);
+xfer += oprot->writeString(_iter610->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -14536,24 +14578,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other603) {
-  dbname = other603.dbname;
-  tablename = other603.tablename;
-  partitionname = other603.partitionname;
-  type = other603.type;
-  runas = other603.runas;
-  __isset = other603.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other604) {
-  dbname = other604.dbname;
-  tablename = other604.tablename;
-  partitionname = other604.partitionname;
-  type = other604.type;
-  runas = other604.runas;
-  __isset = other604.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other611) {
+  dbname = other611.dbname;
+  tablename = other611.tablename;
+  partitionname = other611.partitionname;
+  type = other611.type;
+  runas = other611.runas;
+  properties = other611.properties;
+  __isset = other611.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other612) {
+  dbname = other612.dbname;
+  tablename = other612.tablename;
+  partitionname = other612.partitionname;
+  type = other612.type;
+  runas = other612.runas;
+  properties = other612.properties;
+  __isset = other612.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -14564,6 +14609,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -14616,11 +14662,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[8/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e276929d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e276929d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e276929d

Branch: refs/heads/branch-2.1
Commit: e276929df46cca5acb3b392fed4d72313748c8f7
Parents: 4b55335
Author: Wei Zheng 
Authored: Fri May 27 15:16:01 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 15:16:01 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  161 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/hive-txn-schema-2.1.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../derby/upgrade-2.0.0-to-2.1.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../upgrade/mssql/hive-schema-2.1.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/hive-txn-schema-2.1.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/hive-txn-schema-2.1.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../postgres/hive-txn-schema-2.1.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2020 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  980 
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java   |   32 +-
 .../api/GetFileMetadataByExprResult.java|   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java|   44 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2188 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1232 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  315 +--
 .../hive_metastore/ThriftHiveMetastore.py   |  830 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  197 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   56 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 65 files changed, 4780 insertions(+), 4145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 

[1/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 4b553358b -> e276929df


http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4db9680..8d88cd7 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -9984,6 +9984,7 @@ class CompactionRequest:
- partitionname
- type
- runas
+   - properties
   """
 
   thrift_spec = (
@@ -9993,14 +9994,16 @@ class CompactionRequest:
 (3, TType.STRING, 'partitionname', None, None, ), # 3
 (4, TType.I32, 'type', None, None, ), # 4
 (5, TType.STRING, 'runas', None, None, ), # 5
+(6, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, 
), # 6
   )
 
-  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None,):
+  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None, properties=None,):
 self.dbname = dbname
 self.tablename = tablename
 self.partitionname = partitionname
 self.type = type
 self.runas = runas
+self.properties = properties
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -10036,6 +10039,17 @@ class CompactionRequest:
   self.runas = iprot.readString()
 else:
   iprot.skip(ftype)
+  elif fid == 6:
+if ftype == TType.MAP:
+  self.properties = {}
+  (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
+  for _i466 in xrange(_size462):
+_key467 = iprot.readString()
+_val468 = iprot.readString()
+self.properties[_key467] = _val468
+  iprot.readMapEnd()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -10066,6 +10080,14 @@ class CompactionRequest:
   oprot.writeFieldBegin('runas', TType.STRING, 5)
   oprot.writeString(self.runas)
   oprot.writeFieldEnd()
+if self.properties is not None:
+  oprot.writeFieldBegin('properties', TType.MAP, 6)
+  oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
+  for kiter469,viter470 in self.properties.items():
+oprot.writeString(kiter469)
+oprot.writeString(viter470)
+  oprot.writeMapEnd()
+  oprot.writeFieldEnd()
 oprot.writeFieldStop()
 oprot.writeStructEnd()
 
@@ -10086,6 +10108,7 @@ class CompactionRequest:
 value = (value * 31) ^ hash(self.partitionname)
 value = (value * 31) ^ hash(self.type)
 value = (value * 31) ^ hash(self.runas)
+value = (value * 31) ^ hash(self.properties)
 return value
 
   def __repr__(self):
@@ -10387,11 +10410,11 @@ class ShowCompactResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.compacts = []
-  (_etype465, _size462) = iprot.readListBegin()
-  for _i466 in xrange(_size462):
-_elem467 = ShowCompactResponseElement()
-_elem467.read(iprot)
-self.compacts.append(_elem467)
+  (_etype474, _size471) = iprot.readListBegin()
+  for _i475 in xrange(_size471):
+_elem476 = ShowCompactResponseElement()
+_elem476.read(iprot)
+self.compacts.append(_elem476)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10408,8 +10431,8 @@ class ShowCompactResponse:
 if self.compacts is not None:
   oprot.writeFieldBegin('compacts', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-  for iter468 in self.compacts:
-iter468.write(oprot)
+  for iter477 in self.compacts:
+iter477.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -10490,10 +10513,10 @@ class AddDynamicPartitions:
   elif fid == 4:
 if ftype == TType.LIST:
   self.partitionnames = []
-  (_etype472, _size469) = iprot.readListBegin()
-  for _i473 in xrange(_size469):
-_elem474 = iprot.readString()
-self.partitionnames.append(_elem474)
+  (_etype481, _size478) = iprot.readListBegin()
+  for _i482 in xrange(_size478):
+_elem483 = iprot.readString()
+self.partitionnames.append(_elem483)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10527,8 +10550,8 @@ class AddDynamicPartitions:
 if self.partitionnames is not None:
   oprot.writeFieldBegin('partitionnames', TType.LIST, 4)
   oprot.writeListBegin(TType.STRING, len(self.partitionnames))

[7/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 298384c..5a35a50 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size773;
-::apache::thrift::protocol::TType _etype776;
-xfer += iprot->readListBegin(_etype776, _size773);
-this->success.resize(_size773);
-uint32_t _i777;
-for (_i777 = 0; _i777 < _size773; ++_i777)
+uint32_t _size781;
+::apache::thrift::protocol::TType _etype784;
+xfer += iprot->readListBegin(_etype784, _size781);
+this->success.resize(_size781);
+uint32_t _i785;
+for (_i785 = 0; _i785 < _size781; ++_i785)
 {
-  xfer += iprot->readString(this->success[_i777]);
+  xfer += iprot->readString(this->success[_i785]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter778;
-  for (_iter778 = this->success.begin(); _iter778 != this->success.end(); 
++_iter778)
+  std::vector ::const_iterator _iter786;
+  for (_iter786 = this->success.begin(); _iter786 != this->success.end(); 
++_iter786)
   {
-xfer += oprot->writeString((*_iter778));
+xfer += oprot->writeString((*_iter786));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size779;
-::apache::thrift::protocol::TType _etype782;
-xfer += iprot->readListBegin(_etype782, _size779);
-(*(this->success)).resize(_size779);
-uint32_t _i783;
-for (_i783 = 0; _i783 < _size779; ++_i783)
+uint32_t _size787;
+::apache::thrift::protocol::TType _etype790;
+xfer += iprot->readListBegin(_etype790, _size787);
+(*(this->success)).resize(_size787);
+uint32_t _i791;
+for (_i791 = 0; _i791 < _size787; ++_i791)
 {
-  xfer += iprot->readString((*(this->success))[_i783]);
+  xfer += iprot->readString((*(this->success))[_i791]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size784;
-::apache::thrift::protocol::TType _etype787;
-xfer += iprot->readListBegin(_etype787, _size784);
-this->success.resize(_size784);
-uint32_t _i788;
-for (_i788 = 0; _i788 < _size784; ++_i788)
+uint32_t _size792;
+::apache::thrift::protocol::TType _etype795;
+xfer += iprot->readListBegin(_etype795, _size792);
+this->success.resize(_size792);
+uint32_t _i796;
+for (_i796 = 0; _i796 < _size792; ++_i796)
 {
-  xfer += iprot->readString(this->success[_i788]);
+  xfer += iprot->readString(this->success[_i796]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter789;
-  for (_iter789 = this->success.begin(); _iter789 != this->success.end(); 
++_iter789)
+  std::vector ::const_iterator _iter797;
+  for (_iter797 = this->success.begin(); _iter797 != this->success.end(); 
++_iter797)
   {
-xfer += oprot->writeString((*_iter789));
+xfer += oprot->writeString((*_iter797));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

hive git commit: HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, reviewed by Eugene Koifman)

2016-06-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 78bedc8e2 -> 1e2e68816


HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1e2e6881
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1e2e6881
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1e2e6881

Branch: refs/heads/branch-1
Commit: 1e2e68816b884dcd0f6dc2d0de8601bf04e63c52
Parents: 78bedc8
Author: Wei Zheng 
Authored: Tue Jun 14 15:30:56 2016 -0700
Committer: Wei Zheng 
Committed: Tue Jun 14 15:32:37 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 44 ++--
 1 file changed, 22 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1e2e6881/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 5a7ed17..717f631 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -83,7 +83,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   private static ScheduledExecutorService heartbeatExecutorService = null;
   private ScheduledFuture heartbeatTask = null;
   private Runnable shutdownRunner = null;
-  static final int SHUTDOWN_HOOK_PRIORITY = 0;
+  private static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   DbTxnManager() {
 shutdownRunner = new Runnable() {
@@ -160,10 +160,11 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 getLockManager();
 
 boolean atLeastOneLock = false;
+queryId = plan.getQueryId();
 
-LockRequestBuilder rqstBuilder = new LockRequestBuilder(plan.getQueryId());
+LockRequestBuilder rqstBuilder = new LockRequestBuilder(queryId);
 //link queryId to txnId
-LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + plan.getQueryId());
+LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + queryId);
 rqstBuilder.setTransactionId(txnId)
 .setUser(username);
 
@@ -303,7 +304,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // Make sure we need locks.  It's possible there's nothing to lock in
 // this operation.
 if (!atLeastOneLock) {
-  LOG.debug("No locks needed for queryId" + plan.getQueryId());
+  LOG.debug("No locks needed for queryId" + queryId);
   return null;
 }
 
@@ -311,7 +312,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 if(isTxnOpen()) {
   statementId++;
 }
-LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), 
isBlocking, locks);
+LockState lockState = lockMgr.lock(rqstBuilder.build(), queryId, 
isBlocking, locks);
 ctx.setHiveLocks(locks);
 return lockState;
   }
@@ -323,15 +324,13 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 return t;
   }
   /**
-   * This is for testing only.
+   * This is for testing only. Normally client should call {@link 
#acquireLocks(QueryPlan, Context, String, boolean)}
* @param delay time to delay for first heartbeat
-   * @return null if no locks were needed
*/
   @VisibleForTesting
   void acquireLocksWithHeartbeatDelay(QueryPlan plan, Context ctx, String 
username, long delay) throws LockException {
 acquireLocks(plan, ctx, username, true);
 ctx.setHeartbeater(startHeartbeat(delay));
-queryId = plan.getQueryId();
   }
   
   @Override
@@ -437,24 +436,25 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 }
   }
 
-  private Heartbeater startHeartbeat() throws LockException {
-return startHeartbeat(0);
-  }
-
   /**
-   *  This is for testing only.  Normally client should call {@link 
#startHeartbeat()}
-   *  Make the heartbeater start before an initial delay period.
-   *  @param delay time to delay before first execution, in milliseconds
-   *  @return heartbeater
+   * Start the heartbeater threadpool and return the task.
+   * @param initialDelay time to delay before first execution, in milliseconds
+   * @return heartbeater
*/
-  Heartbeater startHeartbeat(long delay) throws LockException {
+  private Heartbeater startHeartbeat(long initialDelay) throws LockException {
 long heartbeatInterval = getHeartbeatInterval(conf);
 assert heartbeatInterval > 0;
 Heartbeater heartbeater = new Heartbeater(this, conf);
+// For negative testing purpose..
+if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && 

hive git commit: HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, reviewed by Eugene Koifman)

2016-06-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 942f731c7 -> 40a15536a


HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/40a15536
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/40a15536
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/40a15536

Branch: refs/heads/master
Commit: 40a15536a37893940a4ecfc06c8eda46cd5d50ea
Parents: 942f731
Author: Wei Zheng 
Authored: Tue Jun 14 15:30:56 2016 -0700
Committer: Wei Zheng 
Committed: Tue Jun 14 15:30:56 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 45 ++--
 1 file changed, 22 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/40a15536/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 9988eec..5b6f20c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.lockmgr;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -84,7 +83,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   private static ScheduledExecutorService heartbeatExecutorService = null;
   private ScheduledFuture heartbeatTask = null;
   private Runnable shutdownRunner = null;
-  static final int SHUTDOWN_HOOK_PRIORITY = 0;
+  private static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   DbTxnManager() {
 shutdownRunner = new Runnable() {
@@ -161,10 +160,11 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 getLockManager();
 
 boolean atLeastOneLock = false;
+queryId = plan.getQueryId();
 
-LockRequestBuilder rqstBuilder = new LockRequestBuilder(plan.getQueryId());
+LockRequestBuilder rqstBuilder = new LockRequestBuilder(queryId);
 //link queryId to txnId
-LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + plan.getQueryId());
+LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + queryId);
 rqstBuilder.setTransactionId(txnId)
 .setUser(username);
 
@@ -304,7 +304,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // Make sure we need locks.  It's possible there's nothing to lock in
 // this operation.
 if (!atLeastOneLock) {
-  LOG.debug("No locks needed for queryId" + plan.getQueryId());
+  LOG.debug("No locks needed for queryId" + queryId);
   return null;
 }
 
@@ -312,7 +312,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 if(isTxnOpen()) {
   statementId++;
 }
-LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), 
isBlocking, locks);
+LockState lockState = lockMgr.lock(rqstBuilder.build(), queryId, 
isBlocking, locks);
 ctx.setHiveLocks(locks);
 return lockState;
   }
@@ -324,15 +324,13 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 return t;
   }
   /**
-   * This is for testing only.
+   * This is for testing only. Normally client should call {@link 
#acquireLocks(QueryPlan, Context, String, boolean)}
* @param delay time to delay for first heartbeat
-   * @return null if no locks were needed
*/
   @VisibleForTesting
   void acquireLocksWithHeartbeatDelay(QueryPlan plan, Context ctx, String 
username, long delay) throws LockException {
 acquireLocks(plan, ctx, username, true);
 ctx.setHeartbeater(startHeartbeat(delay));
-queryId = plan.getQueryId();
   }
 
 
@@ -439,24 +437,25 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 }
   }
 
-  private Heartbeater startHeartbeat() throws LockException {
-return startHeartbeat(0);
-  }
-
   /**
-   *  This is for testing only.  Normally client should call {@link 
#startHeartbeat()}
-   *  Make the heartbeater start before an initial delay period.
-   *  @param delay time to delay before first execution, in milliseconds
-   *  @return heartbeater
+   * Start the heartbeater threadpool and return the task.
+   * @param initialDelay time to delay before first execution, in milliseconds
+   * @return heartbeater
*/
-  Heartbeater startHeartbeat(long delay) throws LockException 

hive git commit: HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 1e8a31e8f -> ff55d0a67


HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed 
by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff55d0a6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff55d0a6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff55d0a6

Branch: refs/heads/master
Commit: ff55d0a67e59c15b5ccfbdf1317bfd60cf057a30
Parents: 1e8a31e
Author: Wei 
Authored: Thu Mar 10 13:39:13 2016 -0800
Committer: Wei 
Committed: Thu Mar 10 13:39:13 2016 -0800

--
 .../hadoop/hive/metastore/TestHiveMetaStore.java   |  1 +
 .../metastore/TransactionalValidationListener.java | 11 +++
 .../test/queries/clientnegative/alter_external_acid.q  |  9 +
 .../test/queries/clientnegative/create_external_acid.q |  6 ++
 .../results/clientnegative/alter_external_acid.q.out   | 13 +
 .../results/clientnegative/create_external_acid.q.out  |  5 +
 6 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index a55c186..5da4165 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -2944,6 +2944,7 @@ public abstract class TestHiveMetaStore extends TestCase {
 
 tbl.setSd(sd);
 tbl.setLastAccessTime(lastAccessTime);
+tbl.setTableType(TableType.MANAGED_TABLE.toString());
 
 client.createTable(tbl);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index 96158f8..3e74675 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -86,6 +86,12 @@ final class TransactionalValidationListener extends 
MetaStorePreEventListener {
 throw new MetaException("The table must be bucketed and stored using 
an ACID compliant" +
 " format (such as ORC)");
   }
+
+  if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
+throw new MetaException(newTable.getDbName() + "." + 
newTable.getTableName() +
+" cannot be declared transactional because it's an external 
table");
+  }
+
   return;
 }
 Table oldTable = context.getOldTable();
@@ -144,6 +150,11 @@ final class TransactionalValidationListener extends 
MetaStorePreEventListener {
 " format (such as ORC)");
   }
 
+  if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
+throw new MetaException(newTable.getDbName() + "." + 
newTable.getTableName() +
+" cannot be declared transactional because it's an external 
table");
+  }
+
   // normalize prop name
   parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, 
Boolean.TRUE.toString());
   return;

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/alter_external_acid.q
--
diff --git a/ql/src/test/queries/clientnegative/alter_external_acid.q 
b/ql/src/test/queries/clientnegative/alter_external_acid.q
new file mode 100644
index 000..7807278
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/alter_external_acid.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create external table acid_external (a int, b varchar(128)) clustered by (b) 
into 2 buckets stored as orc;
+
+alter table acid_external set TBLPROPERTIES ('transactional'='true');
+
+drop table acid_external;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/create_external_acid.q
--
diff --git a/ql/src/test/queries/clientnegative/create_external_acid.q 

[1/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 f7c8fb527 -> 24b366f0b


http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index 96158f8..3e74675 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -86,6 +86,12 @@ final class TransactionalValidationListener extends 
MetaStorePreEventListener {
 throw new MetaException("The table must be bucketed and stored using 
an ACID compliant" +
 " format (such as ORC)");
   }
+
+  if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
+throw new MetaException(newTable.getDbName() + "." + 
newTable.getTableName() +
+" cannot be declared transactional because it's an external 
table");
+  }
+
   return;
 }
 Table oldTable = context.getOldTable();
@@ -144,6 +150,11 @@ final class TransactionalValidationListener extends 
MetaStorePreEventListener {
 " format (such as ORC)");
   }
 
+  if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
+throw new MetaException(newTable.getDbName() + "." + 
newTable.getTableName() +
+" cannot be declared transactional because it's an external 
table");
+  }
+
   // normalize prop name
   parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, 
Boolean.TRUE.toString());
   return;

http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/queries/clientnegative/alter_external_acid.q
--
diff --git a/ql/src/test/queries/clientnegative/alter_external_acid.q 
b/ql/src/test/queries/clientnegative/alter_external_acid.q
new file mode 100644
index 000..7807278
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/alter_external_acid.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create external table acid_external (a int, b varchar(128)) clustered by (b) 
into 2 buckets stored as orc;
+
+alter table acid_external set TBLPROPERTIES ('transactional'='true');
+
+drop table acid_external;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/queries/clientnegative/create_external_acid.q
--
diff --git a/ql/src/test/queries/clientnegative/create_external_acid.q 
b/ql/src/test/queries/clientnegative/create_external_acid.q
new file mode 100644
index 000..d6b2d84
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_external_acid.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create external table acid_external (a int, b varchar(128)) clustered by (b) 
into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+

http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/results/clientnegative/alter_external_acid.q.out
--
diff --git a/ql/src/test/results/clientnegative/alter_external_acid.q.out 
b/ql/src/test/results/clientnegative/alter_external_acid.q.out
new file mode 100644
index 000..69bba3b
--- /dev/null
+++ b/ql/src/test/results/clientnegative/alter_external_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create external table acid_external (a int, b varchar(128)) 
clustered by (b) into 2 buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_external
+POSTHOOK: query: create external table acid_external (a int, b varchar(128)) 
clustered by (b) into 2 buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_external
+PREHOOK: query: alter table acid_external set TBLPROPERTIES 
('transactional'='true')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@acid_external
+PREHOOK: Output: default@acid_external
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. 
default.acid_external cannot be declared transactional because it's an external 
table

http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/results/clientnegative/create_external_acid.q.out
--
diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out 

hive git commit: HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)

2016-03-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master ff55d0a67 -> 456a91ecd


HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped 
before compaction (Wei Zheng, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/456a91ec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/456a91ec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/456a91ec

Branch: refs/heads/master
Commit: 456a91ecde6a449177a76fb34ad9b5f13983821b
Parents: ff55d0a
Author: Wei Zheng 
Authored: Thu Mar 10 14:37:35 2016 -0800
Committer: Wei Zheng 
Committed: Thu Mar 10 14:37:35 2016 -0800

--
 .../hive/metastore/AcidEventListener.java   |  94 +
 .../hadoop/hive/metastore/HiveMetaStore.java|   1 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|  20 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 167 ++-
 .../hadoop/hive/metastore/txn/TxnStore.java |  37 ++--
 .../hadoop/hive/metastore/txn/TxnUtils.java |  18 ++
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  27 +--
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |  22 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 209 ++-
 9 files changed, 518 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
new file mode 100644
index 000..71ad916
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+
+
+/**
+ * It handles cleanup of dropped partition/table/database in ACID related 
metastore tables
+ */
+public class AcidEventListener extends MetaStoreEventListener {
+
+  private TxnStore txnHandler;
+  private HiveConf hiveConf;
+
+  public AcidEventListener(Configuration configuration) {
+super(configuration);
+hiveConf = (HiveConf) configuration;
+  }
+
+  @Override
+  public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
+// We can loop thru all the tables to check if they are ACID first and 
then perform cleanup,
+// but it's more efficient to unconditionally perform cleanup for the 
database, especially
+// when there are a lot of tables
+txnHandler = getTxnHandler();
+txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), 
null, null);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent)  throws MetaException {
+if (TxnUtils.isAcidTable(tableEvent.getTable())) {
+  txnHandler = getTxnHandler();
+  txnHandler.cleanupRecords(HiveObjectType.TABLE, null, 
tableEvent.getTable(), null);
+}
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent)  throws 
MetaException {
+if (TxnUtils.isAcidTable(partitionEvent.getTable())) {
+  txnHandler = getTxnHandler();
+  txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, 
partitionEvent.getTable(),
+  partitionEvent.getPartitionIterator());
+}
+  }
+
+  private TxnStore getTxnHandler() {
+boolean hackOn = HiveConf.getBoolVar(hiveConf, 

[2/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
new file mode 100644
index 000..b005759
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
@@ -0,0 +1,3224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.FunctionType;
+import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.ResourceType;
+import org.apache.hadoop.hive.metastore.api.ResourceUri;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.HiveInputFormat;
+import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public abstract class TestHiveMetaStore extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestHiveMetaStore.class);
+  protected static HiveMetaStoreClient client;
+  protected static HiveConf hiveConf;
+  protected static Warehouse warehouse;
+  protected static boolean isThriftClient = false;
+
+  private static final String TEST_DB1_NAME = "testdb1";
+  private static final String 

[3/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed 
by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/24b366f0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/24b366f0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/24b366f0

Branch: refs/heads/branch-1
Commit: 24b366f0bbe785b6f478881e8224e43d7aa33094
Parents: f7c8fb5
Author: Wei 
Authored: Thu Mar 10 13:58:42 2016 -0800
Committer: Wei 
Committed: Thu Mar 10 13:58:42 2016 -0800

--
 .../hive/metastore/TestHiveMetaStore.java   |1 +
 .../hive/metastore/TestHiveMetaStore.java.orig  | 3224 ++
 .../TransactionalValidationListener.java|   11 +
 .../clientnegative/alter_external_acid.q|9 +
 .../clientnegative/create_external_acid.q   |6 +
 .../clientnegative/alter_external_acid.q.out|   13 +
 .../clientnegative/create_external_acid.q.out   |5 +
 7 files changed, 3269 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index b005759..605dc9d 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -2941,6 +2941,7 @@ public abstract class TestHiveMetaStore extends TestCase {
 
 tbl.setSd(sd);
 tbl.setLastAccessTime(lastAccessTime);
+tbl.setTableType(TableType.MANAGED_TABLE.toString());
 
 client.createTable(tbl);
 



[1/2] hive git commit: HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 24b366f0b -> 73a677be3


http://git-wip-us.apache.org/repos/asf/hive/blob/73a677be/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
deleted file mode 100644
index b005759..000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig
+++ /dev/null
@@ -1,3224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.thrift.TException;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-
-public abstract class TestHiveMetaStore extends TestCase {
-  private static final Log LOG = LogFactory.getLog(TestHiveMetaStore.class);
-  protected static HiveMetaStoreClient client;
-  protected static HiveConf hiveConf;
-  protected static Warehouse warehouse;
-  protected static boolean isThriftClient = false;
-
-  private 

[2/2] hive git commit: HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-03-10 Thread weiz
HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/73a677be
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/73a677be
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/73a677be

Branch: refs/heads/branch-1
Commit: 73a677be3e2027b61ae043544311e9f296fab613
Parents: 24b366f
Author: Wei 
Authored: Thu Mar 10 14:02:17 2016 -0800
Committer: Wei 
Committed: Thu Mar 10 14:02:17 2016 -0800

--
 .../hive/metastore/TestHiveMetaStore.java.orig  | 3224 --
 1 file changed, 3224 deletions(-)
--




hive git commit: HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)

2016-03-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 73a677be3 -> 214e4b6ff


HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped 
before compaction (Wei Zheng, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/214e4b6f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/214e4b6f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/214e4b6f

Branch: refs/heads/branch-1
Commit: 214e4b6ffedbdc0f610babcf1156cd32f0659db3
Parents: 73a677b
Author: Wei Zheng 
Authored: Thu Mar 10 16:57:26 2016 -0800
Committer: Wei Zheng 
Committed: Thu Mar 10 16:57:26 2016 -0800

--
 .../hive/metastore/AcidEventListener.java   |  69 +++
 .../hadoop/hive/metastore/HiveMetaStore.java|   1 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|  20 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 183 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  25 +--
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |  22 +--
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 186 +++
 7 files changed, 460 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/214e4b6f/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
new file mode 100644
index 000..767bc54
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+
+
+/**
+ * It handles cleanup of dropped partition/table/database in ACID related 
metastore tables
+ */
+public class AcidEventListener extends MetaStoreEventListener {
+
+  private TxnHandler txnHandler;
+  private HiveConf hiveConf;
+
+  public AcidEventListener(Configuration configuration) {
+super(configuration);
+hiveConf = (HiveConf) configuration;
+  }
+
+  @Override
+  public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
+// We can loop thru all the tables to check if they are ACID first and 
then perform cleanup,
+// but it's more efficient to unconditionally perform cleanup for the 
database, especially
+// when there are a lot of tables
+txnHandler = new TxnHandler(hiveConf);
+txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), 
null, null);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent)  throws MetaException {
+if (TxnHandler.isAcidTable(tableEvent.getTable())) {
+  txnHandler = new TxnHandler(hiveConf);
+  txnHandler.cleanupRecords(HiveObjectType.TABLE, null, 
tableEvent.getTable(), null);
+}
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent)  throws 
MetaException {
+if (TxnHandler.isAcidTable(partitionEvent.getTable())) {
+  txnHandler = new TxnHandler(hiveConf);
+  txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, 
partitionEvent.getTable(),
+  partitionEvent.getPartitionIterator());
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/214e4b6f/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
--
diff --git 

hive git commit: HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)

2016-03-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 214e4b6ff -> 1c44f4ccd


HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, 
reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1c44f4cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1c44f4cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1c44f4cc

Branch: refs/heads/branch-1
Commit: 1c44f4ccdcf1d2e47b9132a45e57c04b27ec6ac2
Parents: 214e4b6
Author: Wei Zheng 
Authored: Mon Mar 14 14:45:54 2016 -0700
Committer: Wei Zheng 
Committed: Mon Mar 14 14:45:54 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java  |  1 +
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java  |  4 
 .../test/queries/clientnegative/compact_non_acid_table.q | 11 +++
 ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q   |  2 +-
 .../results/clientnegative/compact_non_acid_table.q.out  | 11 +++
 .../test/results/clientpositive/dbtxnmgr_compact1.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact2.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact3.q.out  |  4 ++--
 10 files changed, 36 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 77e82a4..160a31d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -407,6 +407,7 @@ public enum ErrorMsg {
   TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on 
one partition at a " +
   "time."),
   DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current 
context"),
+  NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on 
non-ACID table {0}.{1}", true),
 
   UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing 
rewritten update or " +
   "delete query"),

http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 3d8ca92..414293c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1710,6 +1710,10 @@ public class DDLTask extends Task implements 
Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException 
{
 
 Table tbl = db.getTable(desc.getTableName());
+if (!AcidUtils.isAcidTable(tbl)) {
+  throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, 
tbl.getDbName(),
+  tbl.getTableName());
+}
 
 String partName = null;
 if (desc.getPartSpec() == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/test/queries/clientnegative/compact_non_acid_table.q
--
diff --git a/ql/src/test/queries/clientnegative/compact_non_acid_table.q 
b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
new file mode 100644
index 000..e9faa24
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
@@ -0,0 +1,11 @@
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+
+create table not_an_acid_table (a int, b varchar(128));
+
+alter table not_an_acid_table compact 'major';
+
+drop table not_an_acid_table;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
--
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q 
b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
index 7f71305..b86c6f9 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table T1(key string, val string) stored as textfile;
+create table T1(key string, val string) clustered by (val) into 2 

hive git commit: HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)

2016-03-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master e7a175663 -> b6af0124b


HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, 
reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6af0124
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6af0124
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6af0124

Branch: refs/heads/master
Commit: b6af0124b351ba759a15c81f8ececd7920115b2f
Parents: e7a1756
Author: Wei Zheng 
Authored: Mon Mar 14 14:34:28 2016 -0700
Committer: Wei Zheng 
Committed: Mon Mar 14 14:34:28 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java  |  1 +
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java  |  4 
 .../test/queries/clientnegative/compact_non_acid_table.q | 11 +++
 ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q   |  2 +-
 .../results/clientnegative/compact_non_acid_table.q.out  | 11 +++
 .../test/results/clientpositive/dbtxnmgr_compact1.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact2.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact3.q.out  |  4 ++--
 10 files changed, 36 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index f0cc3a2..f091f67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -397,6 +397,7 @@ public enum ErrorMsg {
   TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on 
one partition at a " +
   "time."),
   DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current 
context"),
+  NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on 
non-ACID table {0}.{1}", true),
 
   UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing 
rewritten update or " +
   "delete query"),

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 2a64cfa..56eecf6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1745,6 +1745,10 @@ public class DDLTask extends Task implements 
Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException 
{
 
 Table tbl = db.getTable(desc.getTableName());
+if (!AcidUtils.isAcidTable(tbl)) {
+  throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, 
tbl.getDbName(),
+  tbl.getTableName());
+}
 
 String partName = null;
 if (desc.getPartSpec() == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientnegative/compact_non_acid_table.q
--
diff --git a/ql/src/test/queries/clientnegative/compact_non_acid_table.q 
b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
new file mode 100644
index 000..e9faa24
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
@@ -0,0 +1,11 @@
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+
+create table not_an_acid_table (a int, b varchar(128));
+
+alter table not_an_acid_table compact 'major';
+
+drop table not_an_acid_table;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
--
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q 
b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
index 7f71305..b86c6f9 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table T1(key string, val string) stored as textfile;
+create table T1(key string, val string) clustered by (val) into 2 buckets 

[1/2] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-03-30 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 f4468ce68 -> eda730320


HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1785ca00
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1785ca00
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1785ca00

Branch: refs/heads/branch-2.0
Commit: 1785ca000596177c28511dc151deb967c3ce1710
Parents: f4468ce
Author: Wei Zheng 
Authored: Thu Mar 24 17:29:59 2016 -0700
Committer: Wei Zheng 
Committed: Wed Mar 30 15:10:42 2016 -0700

--
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 11 +
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |  5 +++
 .../hive/ql/txn/compactor/CompactorThread.java  |  5 +++
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  9 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java|  6 +++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 
 6 files changed, 82 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
--
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 4c77842..baeafad 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.hcatalog.streaming;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.cli.CliSessionState;
@@ -342,6 +343,11 @@ public class HiveEndPoint {
 return null;
   }
 } );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 LOG.error("Error closing connection to " + endPt, e);
   } catch (InterruptedException e) {
@@ -937,6 +943,11 @@ public class HiveEndPoint {
   }
 }
 );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 throw new ImpersonationFailed("Failed closing Txn Batch as user '" + 
username +
 "' on  endPoint :" + endPt, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index fbf5481..974184f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -224,6 +224,11 @@ public class Cleaner extends CompactorThread {
 return null;
   }
 });
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
+  ci.getFullPartitionName());}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 3f6b099..859caff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -173,6 +173,11 @@ abstract class CompactorThread extends Thread implements 
MetaStoreThread {
   return null;
 }
   });
+  try {
+FileSystem.closeAllForUGI(ugi);
+  } catch (IOException exception) {
+LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+  }
 
   if (wrapper.size() == 1) {
 LOG.debug("Running job as " + wrapper.get(0));


[2/2] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-03-30 Thread weiz
HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, 
ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eda73032
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eda73032
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eda73032

Branch: refs/heads/branch-2.0
Commit: eda7303209c2d59428261201c7926904ba127bc3
Parents: 1785ca0
Author: Wei Zheng 
Authored: Thu Mar 24 22:18:32 2016 -0700
Committer: Wei Zheng 
Committed: Wed Mar 30 15:10:50 2016 -0700

--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java   | 5 +++--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++--
 ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++--
 3 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 974184f..64edfb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -227,8 +227,9 @@ public class Cleaner extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi + 
" for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index 9d71c5a..465896d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -222,8 +222,8 @@ public class Initiator extends CompactorThread {
   try {
 FileSystem.closeAllForUGI(ugi);
   } catch (IOException exception) {
-LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-ci.getFullPartitionName());
+LOG.error("Could not clean up file-system handles for UGI: " + ugi + " 
for " +
+ci.getFullPartitionName(), exception);
   }
   return compactionType;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index 8dbe3d4..cdae26f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -175,8 +175,9 @@ public class Worker extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi, exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi + " for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCompacted(ci);
 } catch (Exception e) {



hive git commit: HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman)

2016-03-21 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 505c5585c -> 0aaddb7d7


HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc 
improvements (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0aaddb7d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0aaddb7d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0aaddb7d

Branch: refs/heads/branch-1
Commit: 0aaddb7d753a2936c973d9ab99e6edb2554f94ae
Parents: 505c558
Author: Wei Zheng 
Authored: Mon Mar 21 14:50:12 2016 -0700
Committer: Wei Zheng 
Committed: Mon Mar 21 14:50:12 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 +
 .../metastore/txn/CompactionTxnHandler.java | 120 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|   4 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 245 ++-
 .../hive/metastore/txn/TestTxnHandler.java  |  83 ++-
 5 files changed, 333 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0aaddb7d/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4a575b3..b78bea2 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -573,6 +573,13 @@ public class HiveConf extends Configuration {
 "select query has incorrect syntax or something similar inside a 
transaction, the\n" +
 "entire transaction will fail and fall-back to DataNucleus will not be 
possible. You\n" +
 "should disable the usage of direct SQL inside transactions if that 
happens in your case."),
+METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 
100, "The maximum\n" +
+" size of a query string (in KB)."),
+
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause",
 1000,
+"The maximum number of values in a IN clause. Once exceeded, it will 
be broken into\n" +
+" multiple OR separated IN clauses."),
+
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
+1000, "The maximum number of values in a VALUES clause for INSERT 
statement."),
 
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
 "Thrift does not support nulls in maps, so any nulls present in maps 
retrieved from ORM must " +
 "either be pruned or converted to empty strings. Some backing dbs such 
as Oracle persist empty strings " +

http://git-wip-us.apache.org/repos/asf/hive/blob/0aaddb7d/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 4d736b9..28e06ed 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -364,36 +364,38 @@ public class CompactionTxnHandler extends TxnHandler {
 rs = stmt.executeQuery(s);
 List txnids = new ArrayList<>();
 while (rs.next()) txnids.add(rs.getLong(1));
+// Remove entries from txn_components, as there may be aborted txn 
components
 if (txnids.size() > 0) {
+  List queries = new ArrayList();
+
+  // Prepare prefix and suffix
+  StringBuilder prefix = new StringBuilder();
+  StringBuilder suffix = new StringBuilder();
+
+  prefix.append("delete from TXN_COMPONENTS where ");
 
-  // Remove entries from txn_components, as there may be aborted txn 
components
-  StringBuilder buf = new StringBuilder();
-  //todo: add a safeguard to make sure IN clause is not too large; 
break up by txn id
-  buf.append("delete from TXN_COMPONENTS where tc_txnid in (");
-  boolean first = true;
-  for (long id : txnids) {
-if (first) first = false;
-else buf.append(", ");
-buf.append(id);
-  }
   //because 1 txn may include different partitions/tables even in auto 
commit mode
-  buf.append(") and tc_database = '");
-  buf.append(info.dbname);
-  buf.append("' and tc_table = '");
-  buf.append(info.tableName);
-  buf.append("'");
+ 

hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-03-24 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 6bfec2e97 -> 4fabd038c


HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, 
ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4fabd038
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4fabd038
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4fabd038

Branch: refs/heads/master
Commit: 4fabd038cf64b906a89726805958c43b97194291
Parents: 6bfec2e
Author: Wei Zheng 
Authored: Thu Mar 24 22:18:32 2016 -0700
Committer: Wei Zheng 
Committed: Thu Mar 24 22:18:32 2016 -0700

--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java   | 5 +++--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++--
 ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++--
 3 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 4c31a49..23b1b7f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -275,8 +275,9 @@ public class Cleaner extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi + 
" for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index 98ebf53..abbe5d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -235,8 +235,8 @@ public class Initiator extends CompactorThread {
   try {
 FileSystem.closeAllForUGI(ugi);
   } catch (IOException exception) {
-LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-ci.getFullPartitionName());
+LOG.error("Could not clean up file-system handles for UGI: " + ugi + " 
for " +
+ci.getFullPartitionName(), exception);
   }
   return compactionType;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index e21ca27..6238e2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -175,8 +175,9 @@ public class Worker extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi, exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi + " for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCompacted(ci);
 } catch (Exception e) {



hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-03-24 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 5bae0ad45 -> 82068205a


HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82068205
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82068205
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82068205

Branch: refs/heads/branch-1
Commit: 82068205a59ed4b6aeb2b353eb612ce0da73c5c2
Parents: 5bae0ad
Author: Wei Zheng 
Authored: Thu Mar 24 22:38:39 2016 -0700
Committer: Wei Zheng 
Committed: Thu Mar 24 22:38:39 2016 -0700

--
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 11 +
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |  6 +++
 .../hive/ql/txn/compactor/CompactorThread.java  |  5 +++
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  9 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java|  7 +++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 46 
 6 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
--
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index b0bbd66..2e81bf8 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.streaming;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -342,6 +343,11 @@ public class HiveEndPoint {
 return null;
   }
 } );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 LOG.error("Error closing connection to " + endPt, e);
   } catch (InterruptedException e) {
@@ -937,6 +943,11 @@ public class HiveEndPoint {
   }
 }
 );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 throw new ImpersonationFailed("Failed closing Txn Batch as user '" + 
username +
 "' on  endPoint :" + endPt, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 1e6e8a1..d861bc2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -272,6 +272,12 @@ public class Cleaner extends CompactorThread {
 return null;
   }
 });
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi + 
" for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index ae8865c..952b27a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements 
MetaStoreThread {
   return null;
 }
   });
+  try {
+FileSystem.closeAllForUGI(ugi);
+  } catch (IOException exception) {
+LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+  }
 

hive git commit: HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman)

2016-03-21 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master a6155b75e -> db8fb8a42


HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc 
improvements (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db8fb8a4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db8fb8a4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db8fb8a4

Branch: refs/heads/master
Commit: db8fb8a42a690eaa937d1a0163eaf505c3c48a07
Parents: a6155b7
Author: Wei Zheng 
Authored: Mon Mar 21 11:38:38 2016 -0700
Committer: Wei Zheng 
Committed: Mon Mar 21 11:38:38 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 +
 .../metastore/txn/CompactionTxnHandler.java | 120 ---
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|   4 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 151 +++
 .../hadoop/hive/metastore/txn/TxnUtils.java |  95 
 .../hadoop/hive/metastore/txn/TestTxnUtils.java | 135 +
 6 files changed, 390 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/db8fb8a4/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 98c6372..0f8d67f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -737,6 +737,13 @@ public class HiveConf extends Configuration {
 "select query has incorrect syntax or something similar inside a 
transaction, the\n" +
 "entire transaction will fail and fall-back to DataNucleus will not be 
possible. You\n" +
 "should disable the usage of direct SQL inside transactions if that 
happens in your case."),
+METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 
100, "The maximum\n" +
+" size of a query string (in KB)."),
+
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause",
 1000,
+"The maximum number of values in a IN clause. Once exceeded, it will 
be broken into\n" +
+" multiple OR separated IN clauses."),
+
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
+1000, "The maximum number of values in a VALUES clause for INSERT 
statement."),
 
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
 "Thrift does not support nulls in maps, so any nulls present in maps 
retrieved from ORM must " +
 "either be pruned or converted to empty strings. Some backing dbs such 
as Oracle persist empty strings " +

http://git-wip-us.apache.org/repos/asf/hive/blob/db8fb8a4/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index da2b395..15c01da 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -369,36 +369,38 @@ class CompactionTxnHandler extends TxnHandler {
 rs = stmt.executeQuery(s);
 List txnids = new ArrayList<>();
 while (rs.next()) txnids.add(rs.getLong(1));
+// Remove entries from txn_components, as there may be aborted txn 
components
 if (txnids.size() > 0) {
+  List queries = new ArrayList();
+
+  // Prepare prefix and suffix
+  StringBuilder prefix = new StringBuilder();
+  StringBuilder suffix = new StringBuilder();
+
+  prefix.append("delete from TXN_COMPONENTS where ");
 
-  // Remove entries from txn_components, as there may be aborted txn 
components
-  StringBuilder buf = new StringBuilder();
-  //todo: add a safeguard to make sure IN clause is not too large; 
break up by txn id
-  buf.append("delete from TXN_COMPONENTS where tc_txnid in (");
-  boolean first = true;
-  for (long id : txnids) {
-if (first) first = false;
-else buf.append(", ");
-buf.append(id);
-  }
   //because 1 txn may include different partitions/tables even in auto 
commit mode
-  buf.append(") and tc_database = '");
-  buf.append(info.dbname);
-  buf.append("' and tc_table = '");
-   

hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-03-24 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master d3a5f20b4 -> f9d1b6ab7


HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f9d1b6ab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f9d1b6ab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f9d1b6ab

Branch: refs/heads/master
Commit: f9d1b6ab77ab15b8337c17fbe38557c1f7b5ce58
Parents: d3a5f20
Author: Wei Zheng 
Authored: Thu Mar 24 17:29:59 2016 -0700
Committer: Wei Zheng 
Committed: Thu Mar 24 17:29:59 2016 -0700

--
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 11 +
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |  5 +++
 .../hive/ql/txn/compactor/CompactorThread.java  |  5 +++
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  9 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java|  8 +++-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 
 6 files changed, 82 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
--
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 4c77842..baeafad 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.hcatalog.streaming;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.cli.CliSessionState;
@@ -342,6 +343,11 @@ public class HiveEndPoint {
 return null;
   }
 } );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 LOG.error("Error closing connection to " + endPt, e);
   } catch (InterruptedException e) {
@@ -937,6 +943,11 @@ public class HiveEndPoint {
   }
 }
 );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 throw new ImpersonationFailed("Failed closing Txn Batch as user '" + 
username +
 "' on  endPoint :" + endPt, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 9ffeaec..4c31a49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -272,6 +272,11 @@ public class Cleaner extends CompactorThread {
 return null;
   }
 });
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
+  ci.getFullPartitionName());}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 8495c66..4d6e24e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements 
MetaStoreThread {
   return null;
 }
   });
+  try {
+FileSystem.closeAllForUGI(ugi);
+  } catch (IOException exception) {
+LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+  }
 
   if (wrapper.size() == 1) {
 LOG.debug("Running job as " + wrapper.get(0));


[2/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
new file mode 100755
index 000..9a2322f
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
@@ -0,0 +1,1242 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+import sys
+import pprint
+from urlparse import urlparse
+from thrift.transport import TTransport
+from thrift.transport import TSocket
+from thrift.transport import TSSLSocket
+from thrift.transport import THttpClient
+from thrift.protocol import TBinaryProtocol
+
+from hive_service import ThriftHive
+from hive_service.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+  print('')
+  print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] 
[-s[sl]] function [arg1 [arg2...]]')
+  print('')
+  print('Functions:')
+  print('  void execute(string query)')
+  print('  string fetchOne()')
+  print('   fetchN(i32 numRows)')
+  print('   fetchAll()')
+  print('  Schema getSchema()')
+  print('  Schema getThriftSchema()')
+  print('  HiveClusterStatus getClusterStatus()')
+  print('  QueryPlan getQueryPlan()')
+  print('  void clean()')
+  print('  string getMetaConf(string key)')
+  print('  void setMetaConf(string key, string value)')
+  print('  void create_database(Database database)')
+  print('  Database get_database(string name)')
+  print('  void drop_database(string name, bool deleteData, bool cascade)')
+  print('   get_databases(string pattern)')
+  print('   get_all_databases()')
+  print('  void alter_database(string dbname, Database db)')
+  print('  Type get_type(string name)')
+  print('  bool create_type(Type type)')
+  print('  bool drop_type(string type)')
+  print('   get_type_all(string name)')
+  print('   get_fields(string db_name, string table_name)')
+  print('   get_fields_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('   get_schema(string db_name, string table_name)')
+  print('   get_schema_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('  void create_table(Table tbl)')
+  print('  void create_table_with_environment_context(Table tbl, 
EnvironmentContext environment_context)')
+  print('  void drop_table(string dbname, string name, bool deleteData)')
+  print('  void drop_table_with_environment_context(string dbname, string 
name, bool deleteData, EnvironmentContext environment_context)')
+  print('   get_tables(string db_name, string pattern)')
+  print('   get_table_meta(string db_patterns, string tbl_patterns,  
tbl_types)')
+  print('   get_all_tables(string db_name)')
+  print('  Table get_table(string dbname, string tbl_name)')
+  print('   get_table_objects_by_name(string dbname,  tbl_names)')
+  print('   get_table_names_by_filter(string dbname, string filter, i16 
max_tables)')
+  print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
+  print('  void alter_table_with_environment_context(string dbname, string 
tbl_name, Table new_tbl, EnvironmentContext environment_context)')
+  print('  void alter_table_with_cascade(string dbname, string tbl_name, Table 
new_tbl, bool cascade)')
+  print('  Partition add_partition(Partition new_part)')
+  print('  Partition add_partition_with_environment_context(Partition 
new_part, EnvironmentContext environment_context)')
+  print('  i32 add_partitions( new_parts)')
+  print('  i32 add_partitions_pspec( new_parts)')
+  print('  Partition append_partition(string db_name, string tbl_name,  
part_vals)')
+  print('  AddPartitionsResult add_partitions_req(AddPartitionsRequest 
request)')
+  print('  Partition append_partition_with_environment_context(string db_name, 
string tbl_name,  part_vals, EnvironmentContext environment_context)')
+  print('  Partition append_partition_by_name(string db_name, string tbl_name, 
string part_name)')
+  print('  Partition append_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, EnvironmentContext 
environment_context)')
+  print('  bool drop_partition(string db_name, string tbl_name,  part_vals, 
bool deleteData)')
+  print('  bool drop_partition_with_environment_context(string db_name, string 
tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)')
+  print('  bool drop_partition_by_name(string db_name, string tbl_name, string 
part_name, bool deleteData)')
+  print('  bool drop_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext 

[1/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 6a1f8a835 -> 983036358


http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
new file mode 100644
index 000..978c2a3
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
@@ -0,0 +1,1674 @@
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, 
TApplicationException
+import hive_metastore.ThriftHiveMetastore
+import logging
+from ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class Iface(hive_metastore.ThriftHiveMetastore.Iface):
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+pass
+
+  def fetchOne(self):
+pass
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+pass
+
+  def fetchAll(self):
+pass
+
+  def getSchema(self):
+pass
+
+  def getThriftSchema(self):
+pass
+
+  def getClusterStatus(self):
+pass
+
+  def getQueryPlan(self):
+pass
+
+  def clean(self):
+pass
+
+
+class Client(hive_metastore.ThriftHiveMetastore.Client, Iface):
+  def __init__(self, iprot, oprot=None):
+hive_metastore.ThriftHiveMetastore.Client.__init__(self, iprot, oprot)
+
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+self.send_execute(query)
+self.recv_execute()
+
+  def send_execute(self, query):
+self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
+args = execute_args()
+args.query = query
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_execute(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = execute_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.ex is not None:
+  raise result.ex
+return
+
+  def fetchOne(self):
+self.send_fetchOne()
+return self.recv_fetchOne()
+
+  def send_fetchOne(self):
+self._oprot.writeMessageBegin('fetchOne', TMessageType.CALL, self._seqid)
+args = fetchOne_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchOne(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchOne_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, 
"fetchOne failed: unknown result")
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+self.send_fetchN(numRows)
+return self.recv_fetchN()
+
+  def send_fetchN(self, numRows):
+self._oprot.writeMessageBegin('fetchN', TMessageType.CALL, self._seqid)
+args = fetchN_args()
+args.numRows = numRows
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchN(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchN_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchN 
failed: unknown result")
+
+  def fetchAll(self):
+self.send_fetchAll()
+return self.recv_fetchAll()
+
+  def send_fetchAll(self):
+self._oprot.writeMessageBegin('fetchAll', TMessageType.CALL, self._seqid)
+args = fetchAll_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchAll(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchAll_result()
+

[4/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
new file mode 100644
index 000..934a8a5
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
@@ -0,0 +1,7784 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class ThriftHive {
+
+  public interface Iface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface {
+
+public void execute(String query) throws HiveServerException, 
org.apache.thrift.TException;
+
+public String fetchOne() throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchN(int numRows) throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchAll() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws 
HiveServerException, org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() 
throws HiveServerException, org.apache.thrift.TException;
+
+public HiveClusterStatus getClusterStatus() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws 
HiveServerException, org.apache.thrift.TException;
+
+public void clean() throws org.apache.thrift.TException;
+
+  }
+
+  public interface AsyncIface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore .AsyncIface {
+
+public void execute(String query, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchOne(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void fetchN(int numRows, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchAll(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void clean(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+  }
+
+  public static class Client extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client implements 
Iface {
+public static class Factory implements 
org.apache.thrift.TServiceClientFactory {
+  public Factory() {}
+  public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
+return new Client(prot);
+  }
+  public Client getClient(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+return new Client(iprot, oprot);
+  }
+}
+
+public Client(org.apache.thrift.protocol.TProtocol prot)
+{
+  super(prot, prot);
+}
+
+public Client(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+  super(iprot, oprot);
+}
+
+public void execute(String query) throws 

[8/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98303635
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98303635
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98303635

Branch: refs/heads/master
Commit: 983036358633cfbb6aec30003faac8280372b2c9
Parents: 6a1f8a8
Author: Wei Zheng 
Authored: Mon Apr 4 11:18:25 2016 -0700
Committer: Wei Zheng 
Committed: Mon Apr 4 11:18:25 2016 -0700

--
 service-rpc/src/gen/thrift/gen-py/__init__.py   |0
 service/src/gen/thrift/gen-cpp/ThriftHive.cpp   | 3544 
 service/src/gen/thrift/gen-cpp/ThriftHive.h | 1224 +++
 .../gen-cpp/ThriftHive_server.skeleton.cpp  |   84 +
 .../thrift/gen-cpp/hive_service_constants.cpp   |   17 +
 .../gen/thrift/gen-cpp/hive_service_constants.h |   24 +
 .../gen/thrift/gen-cpp/hive_service_types.cpp   |  351 +
 .../src/gen/thrift/gen-cpp/hive_service_types.h |  176 +
 .../hadoop/hive/service/HiveClusterStatus.java  |  901 ++
 .../hive/service/HiveServerException.java   |  601 ++
 .../hadoop/hive/service/JobTrackerState.java|   45 +
 .../apache/hadoop/hive/service/ThriftHive.java  | 7784 ++
 service/src/gen/thrift/gen-php/ThriftHive.php   | 1943 +
 service/src/gen/thrift/gen-php/Types.php|  338 +
 service/src/gen/thrift/gen-py/__init__.py   |0
 .../gen-py/hive_service/ThriftHive-remote   | 1242 +++
 .../thrift/gen-py/hive_service/ThriftHive.py| 1674 
 .../gen/thrift/gen-py/hive_service/__init__.py  |1 +
 .../gen/thrift/gen-py/hive_service/constants.py |   11 +
 .../gen/thrift/gen-py/hive_service/ttypes.py|  260 +
 .../gen/thrift/gen-rb/hive_service_constants.rb |9 +
 .../src/gen/thrift/gen-rb/hive_service_types.rb |   68 +
 service/src/gen/thrift/gen-rb/thrift_hive.rb|  555 ++
 23 files changed, 20852 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service-rpc/src/gen/thrift/gen-py/__init__.py
--
diff --git a/service-rpc/src/gen/thrift/gen-py/__init__.py 
b/service-rpc/src/gen/thrift/gen-py/__init__.py
new file mode 100644
index 000..e69de29



[6/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.h
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.h 
b/service/src/gen/thrift/gen-cpp/ThriftHive.h
new file mode 100644
index 000..902bd4b
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.h
@@ -0,0 +1,1224 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef ThriftHive_H
+#define ThriftHive_H
+
+#include 
+#include 
+#include "hive_service_types.h"
+#include "ThriftHiveMetastore.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+#ifdef _WIN32
+  #pragma warning( push )
+  #pragma warning (disable : 4250 ) //inheriting methods via dominance 
+#endif
+
+class ThriftHiveIf : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIf {
+ public:
+  virtual ~ThriftHiveIf() {}
+  virtual void execute(const std::string& query) = 0;
+  virtual void fetchOne(std::string& _return) = 0;
+  virtual void fetchN(std::vector & _return, const int32_t 
numRows) = 0;
+  virtual void fetchAll(std::vector & _return) = 0;
+  virtual void getSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getClusterStatus(HiveClusterStatus& _return) = 0;
+  virtual void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) = 0;
+  virtual void clean() = 0;
+};
+
+class ThriftHiveIfFactory : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIfFactory {
+ public:
+  typedef ThriftHiveIf Handler;
+
+  virtual ~ThriftHiveIfFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo& 
connInfo) = 0;
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) = 0;
+};
+
+class ThriftHiveIfSingletonFactory : virtual public ThriftHiveIfFactory {
+ public:
+  ThriftHiveIfSingletonFactory(const boost::shared_ptr& iface) : 
iface_(iface) {}
+  virtual ~ThriftHiveIfSingletonFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+return iface_.get();
+  }
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) {}
+
+ protected:
+  boost::shared_ptr iface_;
+};
+
+class ThriftHiveNull : virtual public ThriftHiveIf , virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreNull {
+ public:
+  virtual ~ThriftHiveNull() {}
+  void execute(const std::string& /* query */) {
+return;
+  }
+  void fetchOne(std::string& /* _return */) {
+return;
+  }
+  void fetchN(std::vector & /* _return */, const int32_t /* 
numRows */) {
+return;
+  }
+  void fetchAll(std::vector & /* _return */) {
+return;
+  }
+  void getSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getThriftSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getClusterStatus(HiveClusterStatus& /* _return */) {
+return;
+  }
+  void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& /* _return */) {
+return;
+  }
+  void clean() {
+return;
+  }
+};
+
+typedef struct _ThriftHive_execute_args__isset {
+  _ThriftHive_execute_args__isset() : query(false) {}
+  bool query :1;
+} _ThriftHive_execute_args__isset;
+
+class ThriftHive_execute_args {
+ public:
+
+  ThriftHive_execute_args(const ThriftHive_execute_args&);
+  ThriftHive_execute_args& operator=(const ThriftHive_execute_args&);
+  ThriftHive_execute_args() : query() {
+  }
+
+  virtual ~ThriftHive_execute_args() throw();
+  std::string query;
+
+  _ThriftHive_execute_args__isset __isset;
+
+  void __set_query(const std::string& val);
+
+  bool operator == (const ThriftHive_execute_args & rhs) const
+  {
+if (!(query == rhs.query))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHive_execute_args ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHive_execute_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHive_execute_pargs {
+ public:
+
+
+  virtual ~ThriftHive_execute_pargs() throw();
+  const std::string* query;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHive_execute_result__isset {
+  _ThriftHive_execute_result__isset() : ex(false) {}
+  bool ex :1;
+} _ThriftHive_execute_result__isset;
+
+class ThriftHive_execute_result {
+ public:
+
+  ThriftHive_execute_result(const ThriftHive_execute_result&);
+  ThriftHive_execute_result& operator=(const ThriftHive_execute_result&);
+  ThriftHive_execute_result() {
+  }
+
+  virtual ~ThriftHive_execute_result() throw();
+  HiveServerException ex;
+
+  _ThriftHive_execute_result__isset __isset;
+
+  void __set_ex(const 

[7/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.cpp 
b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
new file mode 100644
index 000..a5448f0
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
@@ -0,0 +1,3544 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "ThriftHive.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+
+ThriftHive_execute_args::~ThriftHive_execute_args() throw() {
+}
+
+
+uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->query);
+  this->__isset.query = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_args");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->query);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_pargs::~ThriftHive_execute_pargs() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_pargs");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->query)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_result::~ThriftHive_execute_result() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->ex.read(iprot);
+  this->__isset.ex = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHive_execute_result::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHive_execute_result");
+
+  if (this->__isset.ex) {
+xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 
1);
+xfer += this->ex.write(oprot);
+xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_presult::~ThriftHive_execute_presult() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == 

[3/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-php/ThriftHive.php
--
diff --git a/service/src/gen/thrift/gen-php/ThriftHive.php 
b/service/src/gen/thrift/gen-php/ThriftHive.php
new file mode 100644
index 000..23dc8fd
--- /dev/null
+++ b/service/src/gen/thrift/gen-php/ThriftHive.php
@@ -0,0 +1,1943 @@
+send_execute($query);
+$this->recv_execute();
+  }
+
+  public function send_execute($query)
+  {
+$args = new \ThriftHive_execute_args();
+$args->query = $query;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'execute', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('execute', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_execute()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_execute_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_execute_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+return;
+  }
+
+  public function fetchOne()
+  {
+$this->send_fetchOne();
+return $this->recv_fetchOne();
+  }
+
+  public function send_fetchOne()
+  {
+$args = new \ThriftHive_fetchOne_args();
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchOne', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchOne', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchOne()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchOne_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_fetchOne_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->success !== null) {
+  return $result->success;
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+throw new \Exception("fetchOne failed: unknown result");
+  }
+
+  public function fetchN($numRows)
+  {
+$this->send_fetchN($numRows);
+return $this->recv_fetchN();
+  }
+
+  public function send_fetchN($numRows)
+  {
+$args = new \ThriftHive_fetchN_args();
+$args->numRows = $numRows;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchN', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchN', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchN()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchN_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == 

[5/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
new file mode 100644
index 000..97b1219
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
@@ -0,0 +1,601 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class HiveServerException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("HiveServerException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("message", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("errorCode", 
org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField SQLSTATE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("SQLState", 
org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
HiveServerExceptionStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
HiveServerExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+  private int errorCode; // required
+  private String SQLState; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+MESSAGE((short)1, "message"),
+ERROR_CODE((short)2, "errorCode"),
+SQLSTATE((short)3, "SQLState");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // MESSAGE
+  return MESSAGE;
+case 2: // ERROR_CODE
+  return ERROR_CODE;
+case 3: // SQLSTATE
+  return SQLSTATE;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  private 

hive git commit: HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei Zheng, reviewed by Eugene Koifman)

2016-04-25 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 edf89a6a0 -> 648f19307


HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/648f1930
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/648f1930
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/648f1930

Branch: refs/heads/branch-1
Commit: 648f19307cab1b55e44b930ffaf043cc93cd4d46
Parents: edf89a6
Author: Wei Zheng 
Authored: Mon Apr 25 11:17:11 2016 -0700
Committer: Wei Zheng 
Committed: Mon Apr 25 11:19:35 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java |  8 
 .../hadoop/hive/metastore/txn/TxnHandler.java | 18 +++---
 .../hadoop/hive/metastore/txn/TestTxnHandler.java | 15 +++
 3 files changed, 38 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 0d31131..7c93e44 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -173,6 +173,7 @@ public class HiveConf extends Configuration {
   HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
   HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
   HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
+  HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
   HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
   HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
   HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
@@ -1492,6 +1493,13 @@ public class HiveConf extends Configuration {
 "transactions that Hive has to track at any given time, which may 
negatively affect\n" +
 "read performance."),
 
+HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", 
"Comma separated list\n" +
+"of regular expression patterns for SQL state, error code, and error 
message of\n" +
+"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
+"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
+"The string that the regex will be matched against is of the following 
form, where ex is a SQLException:\n" +
+"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", 
ErrorCode=\" + ex.getErrorCode() + \")\""),
+
 HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
 "Whether to run the initiator and cleaner threads on this metastore 
instance or not.\n" +
 "Set this to true on one instance of the Thrift metastore service as 
part of turning\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index ed4a3c2..a64e7c8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -51,6 +51,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
 /**
  * A handler to answer transaction related calls that come into the metastore
@@ -1559,7 +1560,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 } else {
   LOG.error("Too many repeated deadlocks in " + caller + ", giving 
up.");
 }
-  } else if (isRetryable(e)) {
+  } else if (isRetryable(conf, e)) {
 //in MSSQL this means Communication Link Failure
 if (retryNum++ < retryLimit) {
   LOG.warn("Retryable error detected in " + caller + ".  Will wait " + 
retryInterval +
@@ -2658,7 +2659,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   /**
* Returns true if {@code ex} should be retried
*/
-  private static boolean isRetryable(Exception ex) {
+  static boolean isRetryable(HiveConf conf, Exception ex) {
 if(ex instanceof SQLException) {
   SQLException sqlException = (SQLException)ex;
   if("08S01".equalsIgnoreCase(sqlException.getSQLState())) {
@@ -2669,6 +2670,17 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 

[1/3] hive git commit: HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-05-20 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 360dfa0ff -> 259e8be1d


http://git-wip-us.apache.org/repos/asf/hive/blob/259e8be1/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
new file mode 100644
index 000..2804e21
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -0,0 +1,1484 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockLevel;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnInfo;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNull;
+import static junit.framework.Assert.assertTrue;
+import static junit.framework.Assert.fail;
+
+/**
+ * Tests for TxnHandler.
+ */
+public class TestTxnHandler {
+  static final private String CLASS_NAME = TxnHandler.class.getName();
+  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+
+  private HiveConf conf = new HiveConf();
+  private TxnStore txnHandler;
+
+  public TestTxnHandler() throws Exception {
+TxnDbUtil.setConfValues(conf);
+LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+

[3/3] hive git commit: HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-05-20 Thread weiz
HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/259e8be1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/259e8be1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/259e8be1

Branch: refs/heads/master
Commit: 259e8be1d4486c6a17b8c240e43154c5a839524e
Parents: 360dfa0
Author: Wei Zheng 
Authored: Fri May 20 09:50:44 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 20 09:50:44 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |6 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   79 +
 .../hadoop/hive/metastore/txn/TxnStore.java |6 +
 .../metastore/txn/TestCompactionTxnHandler.java |  466 --
 .../hive/metastore/txn/TestTxnHandler.java  | 1484 --
 .../hive/ql/txn/AcidOpenTxnsCounterService.java |   69 +
 .../metastore/txn/TestCompactionTxnHandler.java |  466 ++
 .../hive/metastore/txn/TestTxnHandler.java  | 1484 ++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   41 +-
 9 files changed, 2150 insertions(+), 1951 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/259e8be1/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9cc8fbe..4cfa5f1 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1681,6 +1681,12 @@ public class HiveConf extends Configuration {
 " of the lock manager is dumped to log file.  This is for debugging.  
See also " +
 "hive.lock.numretries and hive.lock.sleep.between.retries."),
 
+HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 10, "Maximum number of open 
transactions. If \n" +
+"current open transactions reach this limit, future open transaction 
requests will be \n" +
+"rejected, until this number goes below the limit."),
+HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
+new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks 
to count open transactions."),
+
 HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
 "Maximum number of transactions that can be fetched in one call to 
open_txns().\n" +
 "This controls how many transactions streaming agents such as Flume or 
Storm open\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/259e8be1/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index abaff34..82d685d 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -28,6 +28,7 @@ import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.hive.common.ServerUtils;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.HouseKeeperService;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -167,6 +168,15 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 }
   }
 
+  // Maximum number of open transactions that's allowed
+  private static volatile int maxOpenTxns = 0;
+  // Current number of open txns
+  private static volatile long numOpenTxns = 0;
+  // Whether number of open transactions reaches the threshold
+  private static volatile boolean tooManyOpenTxns = false;
+  // The AcidHouseKeeperService for counting open transactions
+  private static volatile HouseKeeperService openTxnsCounter = null;
+
   /**
* Number of consecutive deadlocks we have seen
*/
@@ -236,6 +246,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 TimeUnit.MILLISECONDS);
 retryLimit = HiveConf.getIntVar(conf, 
HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 deadlockRetryInterval = retryInterval / 10;
+maxOpenTxns = HiveConf.getIntVar(conf, 
HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS);
   }
 
   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
@@ -362,7 +373,45 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   return getOpenTxns();
 }
   }
+
+  private static void 

[2/3] hive git commit: HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-05-20 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/259e8be1/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
--
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java 
b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
deleted file mode 100644
index 2804e21..000
--- 
a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ /dev/null
@@ -1,1484 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.txn;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
-import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
-import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
-import org.apache.hadoop.hive.metastore.api.CompactionRequest;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockComponent;
-import org.apache.hadoop.hive.metastore.api.LockLevel;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.LockState;
-import org.apache.hadoop.hive.metastore.api.LockType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnInfo;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.api.TxnState;
-import org.apache.hadoop.hive.metastore.api.UnlockRequest;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.core.LoggerContext;
-import org.apache.logging.log4j.core.config.Configuration;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertNull;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
-
-/**
- * Tests for TxnHandler.
- */
-public class TestTxnHandler {
-  static final private String CLASS_NAME = TxnHandler.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  private HiveConf conf = new HiveConf();
-  private TxnStore txnHandler;
-
-  public TestTxnHandler() throws Exception {
-TxnDbUtil.setConfValues(conf);
-LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
-Configuration conf = ctx.getConfiguration();
-

[1/2] hive git commit: HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-05-20 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 5fe252b93 -> cb3636f3f


http://git-wip-us.apache.org/repos/asf/hive/blob/cb3636f3/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java
new file mode 100644
index 000..f5eb8a1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.txn;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.ql.txn.compactor.HouseKeeperServiceBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+/**
+ * Background running thread, periodically updating number of open 
transactions.
+ * Runs inside Hive Metastore Service.
+ */
+public class AcidOpenTxnsCounterService extends HouseKeeperServiceBase {
+  private static final Logger LOG = 
LoggerFactory.getLogger(AcidOpenTxnsCounterService.class);
+  @Override
+  protected long getStartDelayMs() {
+return 100;  // in miliseconds
+  }
+  @Override
+  protected long getIntervalMs() {
+return 
hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_COUNT_OPEN_TXNS_INTERVAL, 
TimeUnit.MILLISECONDS);
+  }
+  @Override
+  protected Runnable getScheduedAction(HiveConf hiveConf, AtomicInteger 
isAliveCounter) {
+return new OpenTxnsCounter(hiveConf, isAliveCounter);
+  }
+  @Override
+  public String getServiceDescription() {
+return "Count number of open transactions";
+  }
+  private static final class OpenTxnsCounter implements Runnable {
+private final TxnStore txnHandler;
+private final AtomicInteger isAliveCounter;
+private OpenTxnsCounter(HiveConf hiveConf, AtomicInteger isAliveCounter) {
+  txnHandler = TxnUtils.getTxnStore(hiveConf);
+  this.isAliveCounter = isAliveCounter;
+}
+@Override
+public void run() {
+  try {
+long startTime = System.currentTimeMillis();
+txnHandler.countOpenTxns();
+int count = isAliveCounter.incrementAndGet();
+LOG.info("OpenTxnsCounter ran for " + (System.currentTimeMillis() - 
startTime)/1000 + "seconds.  isAliveCounter=" + count);
+  }
+  catch(Throwable t) {
+LOG.error("Serious error in {}", Thread.currentThread().getName(), ": 
{}" + t.getMessage(), t);
+  }
+}
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/cb3636f3/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
new file mode 100644
index 000..23ad54e
--- /dev/null
+++ 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -0,0 +1,447 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn;
+
+import 

[2/2] hive git commit: HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-05-20 Thread weiz
HIVE-13249 : Hard upper bound on number of open transactions (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cb3636f3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cb3636f3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cb3636f3

Branch: refs/heads/branch-1
Commit: cb3636f3fe3e45744eed23a542de05f77a3dd356
Parents: 5fe252b
Author: Wei Zheng 
Authored: Fri May 20 10:25:07 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 20 10:25:07 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |6 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   77 +
 .../hadoop/hive/metastore/txn/TxnStore.java |6 +
 .../metastore/txn/TestCompactionTxnHandler.java |  447 -
 .../hive/metastore/txn/TestTxnHandler.java  | 1521 --
 .../hive/ql/txn/AcidOpenTxnsCounterService.java |   69 +
 .../metastore/txn/TestCompactionTxnHandler.java |  447 +
 .../hive/metastore/txn/TestTxnHandler.java  | 1521 ++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   41 +-
 9 files changed, 2166 insertions(+), 1969 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cb3636f3/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4c6aa71..c63c2ca 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1485,6 +1485,12 @@ public class HiveConf extends Configuration {
 " of the lock manager is dumped to log file.  This is for debugging.  
See also " +
 "hive.lock.numretries and hive.lock.sleep.between.retries."),
 
+HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 10, "Maximum number of open 
transactions. If \n" +
+"current open transactions reach this limit, future open transaction 
requests will be \n" +
+"rejected, until this number goes below the limit."),
+HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
+new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks 
to count open transactions."),
+
 HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
 "Maximum number of transactions that can be fetched in one call to 
open_txns().\n" +
 "This controls how many transactions streaming agents such as Flume or 
Storm open\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/cb3636f3/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 4da5542..27fa820 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HouseKeeperService;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -169,6 +170,15 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 }
   }
 
+  // Maximum number of open transactions that's allowed
+  private static volatile int maxOpenTxns = 0;
+  // Current number of open txns
+  private static volatile long numOpenTxns = 0;
+  // Whether number of open transactions reaches the threshold
+  private static volatile boolean tooManyOpenTxns = false;
+  // The AcidHouseKeeperService for counting open transactions
+  private static volatile HouseKeeperService openTxnsCounter = null;
+
   /**
* Number of consecutive deadlocks we have seen
*/
@@ -234,6 +244,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 TimeUnit.MILLISECONDS);
 retryLimit = HiveConf.getIntVar(conf, 
HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 deadlockRetryInterval = retryInterval / 10;
+maxOpenTxns = HiveConf.getIntVar(conf, 
HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS);
   }
 
   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
@@ -383,7 +394,43 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 return new ValidReadTxnList(exceptions, highWater);
   }
 
+  private static void 

[02/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
--
diff --git 
a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 4f522c9..8eef585 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -1054,6 +1054,13 @@ class Iface(fb303.FacebookService.Iface):
 """
 pass
 
+  def abort_txns(self, rqst):
+"""
+Parameters:
+ - rqst
+"""
+pass
+
   def commit_txn(self, rqst):
 """
 Parameters:
@@ -5883,6 +5890,37 @@ class Client(fb303.FacebookService.Client, Iface):
   raise result.o1
 return
 
+  def abort_txns(self, rqst):
+"""
+Parameters:
+ - rqst
+"""
+self.send_abort_txns(rqst)
+self.recv_abort_txns()
+
+  def send_abort_txns(self, rqst):
+self._oprot.writeMessageBegin('abort_txns', TMessageType.CALL, self._seqid)
+args = abort_txns_args()
+args.rqst = rqst
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_abort_txns(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = abort_txns_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.o1 is not None:
+  raise result.o1
+return
+
   def commit_txn(self, rqst):
 """
 Parameters:
@@ -6609,6 +6647,7 @@ class Processor(fb303.FacebookService.Processor, Iface, 
TProcessor):
 self._processMap["get_open_txns_info"] = 
Processor.process_get_open_txns_info
 self._processMap["open_txns"] = Processor.process_open_txns
 self._processMap["abort_txn"] = Processor.process_abort_txn
+self._processMap["abort_txns"] = Processor.process_abort_txns
 self._processMap["commit_txn"] = Processor.process_commit_txn
 self._processMap["lock"] = Processor.process_lock
 self._processMap["check_lock"] = Processor.process_check_lock
@@ -9816,6 +9855,28 @@ class Processor(fb303.FacebookService.Processor, Iface, 
TProcessor):
 oprot.writeMessageEnd()
 oprot.trans.flush()
 
+  def process_abort_txns(self, seqid, iprot, oprot):
+args = abort_txns_args()
+args.read(iprot)
+iprot.readMessageEnd()
+result = abort_txns_result()
+try:
+  self._handler.abort_txns(args.rqst)
+  msg_type = TMessageType.REPLY
+except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+  raise
+except NoSuchTxnException as o1:
+  msg_type = TMessageType.REPLY
+  result.o1 = o1
+except Exception as ex:
+  msg_type = TMessageType.EXCEPTION
+  logging.exception(ex)
+  result = TApplicationException(TApplicationException.INTERNAL_ERROR, 
'Internal error')
+oprot.writeMessageBegin("abort_txns", msg_type, seqid)
+result.write(oprot)
+oprot.writeMessageEnd()
+oprot.trans.flush()
+
   def process_commit_txn(self, seqid, iprot, oprot):
 args = commit_txn_args()
 args.read(iprot)
@@ -11105,10 +11166,10 @@ class get_databases_result:
   if fid == 0:
 if ftype == TType.LIST:
   self.success = []
-  (_etype539, _size536) = iprot.readListBegin()
-  for _i540 in xrange(_size536):
-_elem541 = iprot.readString()
-self.success.append(_elem541)
+  (_etype546, _size543) = iprot.readListBegin()
+  for _i547 in xrange(_size543):
+_elem548 = iprot.readString()
+self.success.append(_elem548)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -11131,8 +11192,8 @@ class get_databases_result:
 if self.success is not None:
   oprot.writeFieldBegin('success', TType.LIST, 0)
   oprot.writeListBegin(TType.STRING, len(self.success))
-  for iter542 in self.success:
-oprot.writeString(iter542)
+  for iter549 in self.success:
+oprot.writeString(iter549)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 if self.o1 is not None:
@@ -11237,10 +11298,10 @@ class get_all_databases_result:
   if fid == 0:
 if ftype == TType.LIST:
   self.success = []
-  (_etype546, _size543) = iprot.readListBegin()
-  for _i547 in xrange(_size543):
-_elem548 = iprot.readString()
-self.success.append(_elem548)
+  (_etype553, _size550) = iprot.readListBegin()
+  for _i554 in xrange(_size550):
+_elem555 = iprot.readString()
+self.success.append(_elem555)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -11263,8 +11324,8 @@ class 

[09/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 29d9ec4..0440df7 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size747;
-::apache::thrift::protocol::TType _etype750;
-xfer += iprot->readListBegin(_etype750, _size747);
-this->success.resize(_size747);
-uint32_t _i751;
-for (_i751 = 0; _i751 < _size747; ++_i751)
+uint32_t _size755;
+::apache::thrift::protocol::TType _etype758;
+xfer += iprot->readListBegin(_etype758, _size755);
+this->success.resize(_size755);
+uint32_t _i759;
+for (_i759 = 0; _i759 < _size755; ++_i759)
 {
-  xfer += iprot->readString(this->success[_i751]);
+  xfer += iprot->readString(this->success[_i759]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter752;
-  for (_iter752 = this->success.begin(); _iter752 != this->success.end(); 
++_iter752)
+  std::vector ::const_iterator _iter760;
+  for (_iter760 = this->success.begin(); _iter760 != this->success.end(); 
++_iter760)
   {
-xfer += oprot->writeString((*_iter752));
+xfer += oprot->writeString((*_iter760));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size753;
-::apache::thrift::protocol::TType _etype756;
-xfer += iprot->readListBegin(_etype756, _size753);
-(*(this->success)).resize(_size753);
-uint32_t _i757;
-for (_i757 = 0; _i757 < _size753; ++_i757)
+uint32_t _size761;
+::apache::thrift::protocol::TType _etype764;
+xfer += iprot->readListBegin(_etype764, _size761);
+(*(this->success)).resize(_size761);
+uint32_t _i765;
+for (_i765 = 0; _i765 < _size761; ++_i765)
 {
-  xfer += iprot->readString((*(this->success))[_i757]);
+  xfer += iprot->readString((*(this->success))[_i765]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size758;
-::apache::thrift::protocol::TType _etype761;
-xfer += iprot->readListBegin(_etype761, _size758);
-this->success.resize(_size758);
-uint32_t _i762;
-for (_i762 = 0; _i762 < _size758; ++_i762)
+uint32_t _size766;
+::apache::thrift::protocol::TType _etype769;
+xfer += iprot->readListBegin(_etype769, _size766);
+this->success.resize(_size766);
+uint32_t _i770;
+for (_i770 = 0; _i770 < _size766; ++_i770)
 {
-  xfer += iprot->readString(this->success[_i762]);
+  xfer += iprot->readString(this->success[_i770]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter763;
-  for (_iter763 = this->success.begin(); _iter763 != this->success.end(); 
++_iter763)
+  std::vector ::const_iterator _iter771;
+  for (_iter771 = this->success.begin(); _iter771 != this->success.end(); 
++_iter771)
   {
-xfer += oprot->writeString((*_iter763));
+xfer += oprot->writeString((*_iter771));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[04/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 2172cb7..034f5fa 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -1046,6 +1046,11 @@ interface ThriftHiveMetastoreIf extends 
\FacebookServiceIf {
*/
   public function abort_txn(\metastore\AbortTxnRequest $rqst);
   /**
+   * @param \metastore\AbortTxnsRequest $rqst
+   * @throws \metastore\NoSuchTxnException
+   */
+  public function abort_txns(\metastore\AbortTxnsRequest $rqst);
+  /**
* @param \metastore\CommitTxnRequest $rqst
* @throws \metastore\NoSuchTxnException
* @throws \metastore\TxnAbortedException
@@ -8474,6 +8479,57 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
 return;
   }
 
+  public function abort_txns(\metastore\AbortTxnsRequest $rqst)
+  {
+$this->send_abort_txns($rqst);
+$this->recv_abort_txns();
+  }
+
+  public function send_abort_txns(\metastore\AbortTxnsRequest $rqst)
+  {
+$args = new \metastore\ThriftHiveMetastore_abort_txns_args();
+$args->rqst = $rqst;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'abort_txns', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('abort_txns', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_abort_txns()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\metastore\ThriftHiveMetastore_abort_txns_result', 
$this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \metastore\ThriftHiveMetastore_abort_txns_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->o1 !== null) {
+  throw $result->o1;
+}
+return;
+  }
+
   public function commit_txn(\metastore\CommitTxnRequest $rqst)
   {
 $this->send_commit_txn($rqst);
@@ -10616,14 +10672,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size539 = 0;
-$_etype542 = 0;
-$xfer += $input->readListBegin($_etype542, $_size539);
-for ($_i543 = 0; $_i543 < $_size539; ++$_i543)
+$_size546 = 0;
+$_etype549 = 0;
+$xfer += $input->readListBegin($_etype549, $_size546);
+for ($_i550 = 0; $_i550 < $_size546; ++$_i550)
 {
-  $elem544 = null;
-  $xfer += $input->readString($elem544);
-  $this->success []= $elem544;
+  $elem551 = null;
+  $xfer += $input->readString($elem551);
+  $this->success []= $elem551;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -10659,9 +10715,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter545)
+  foreach ($this->success as $iter552)
   {
-$xfer += $output->writeString($iter545);
+$xfer += $output->writeString($iter552);
   }
 }
 $output->writeListEnd();
@@ -10792,14 +10848,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size546 = 0;
-$_etype549 = 0;
-$xfer += $input->readListBegin($_etype549, $_size546);
-for ($_i550 = 0; $_i550 < $_size546; ++$_i550)
+$_size553 = 0;
+$_etype556 = 0;
+$xfer += $input->readListBegin($_etype556, $_size553);
+for ($_i557 = 0; $_i557 < $_size553; ++$_i557)
 {
-  $elem551 = null;
-  $xfer += $input->readString($elem551);
-  

[06/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 56060f4..d32cdeb 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -293,6 +293,8 @@ class OpenTxnsResponse;
 
 class AbortTxnRequest;
 
+class AbortTxnsRequest;
+
 class CommitTxnRequest;
 
 class LockComponent;
@@ -4949,6 +4951,46 @@ inline std::ostream& operator<<(std::ostream& out, const 
AbortTxnRequest& obj)
 }
 
 
+class AbortTxnsRequest {
+ public:
+
+  AbortTxnsRequest(const AbortTxnsRequest&);
+  AbortTxnsRequest& operator=(const AbortTxnsRequest&);
+  AbortTxnsRequest() {
+  }
+
+  virtual ~AbortTxnsRequest() throw();
+  std::vector  txn_ids;
+
+  void __set_txn_ids(const std::vector & val);
+
+  bool operator == (const AbortTxnsRequest & rhs) const
+  {
+if (!(txn_ids == rhs.txn_ids))
+  return false;
+return true;
+  }
+  bool operator != (const AbortTxnsRequest ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const AbortTxnsRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AbortTxnsRequest , AbortTxnsRequest );
+
+inline std::ostream& operator<<(std::ostream& out, const AbortTxnsRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
 class CommitTxnRequest {
  public:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
new file mode 100644
index 000..c434737
--- /dev/null
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class AbortTxnsRequest implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AbortTxnsRequest");
+
+  private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txn_ids", 
org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
AbortTxnsRequestStandardSchemeFactory());
+schemes.put(TupleScheme.class, new AbortTxnsRequestTupleSchemeFactory());
+  }
+
+  private List txn_ids; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+TXN_IDS((short)1, "txn_ids");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+

[07/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 82d8686..f0cd007 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -12132,6 +12132,112 @@ void AbortTxnRequest::printTo(std::ostream& out) 
const {
 }
 
 
+AbortTxnsRequest::~AbortTxnsRequest() throw() {
+}
+
+
+void AbortTxnsRequest::__set_txn_ids(const std::vector & val) {
+  this->txn_ids = val;
+}
+
+uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+  bool isset_txn_ids = false;
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_LIST) {
+  {
+this->txn_ids.clear();
+uint32_t _size522;
+::apache::thrift::protocol::TType _etype525;
+xfer += iprot->readListBegin(_etype525, _size522);
+this->txn_ids.resize(_size522);
+uint32_t _i526;
+for (_i526 = 0; _i526 < _size522; ++_i526)
+{
+  xfer += iprot->readI64(this->txn_ids[_i526]);
+}
+xfer += iprot->readListEnd();
+  }
+  isset_txn_ids = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  if (!isset_txn_ids)
+throw TProtocolException(TProtocolException::INVALID_DATA);
+  return xfer;
+}
+
+uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("AbortTxnsRequest");
+
+  xfer += oprot->writeFieldBegin("txn_ids", 
::apache::thrift::protocol::T_LIST, 1);
+  {
+xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, 
static_cast(this->txn_ids.size()));
+std::vector ::const_iterator _iter527;
+for (_iter527 = this->txn_ids.begin(); _iter527 != this->txn_ids.end(); 
++_iter527)
+{
+  xfer += oprot->writeI64((*_iter527));
+}
+xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(AbortTxnsRequest , AbortTxnsRequest ) {
+  using ::std::swap;
+  swap(a.txn_ids, b.txn_ids);
+}
+
+AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other528) {
+  txn_ids = other528.txn_ids;
+}
+AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& 
other529) {
+  txn_ids = other529.txn_ids;
+  return *this;
+}
+void AbortTxnsRequest::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "AbortTxnsRequest(";
+  out << "txn_ids=" << to_string(txn_ids);
+  out << ")";
+}
+
+
 CommitTxnRequest::~CommitTxnRequest() throw() {
 }
 
@@ -12203,11 +12309,11 @@ void swap(CommitTxnRequest , CommitTxnRequest ) {
   swap(a.txnid, b.txnid);
 }
 
-CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other522) {
-  txnid = other522.txnid;
+CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other530) {
+  txnid = other530.txnid;
 }
-CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& 
other523) {
-  txnid = other523.txnid;
+CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& 
other531) {
+  txnid = other531.txnid;
   return *this;
 }
 void CommitTxnRequest::printTo(std::ostream& out) const {
@@ -12270,9 +12376,9 @@ uint32_t 
LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
 {
   case 1:
 if (ftype == ::apache::thrift::protocol::T_I32) {
-  int32_t ecast524;
-  xfer += iprot->readI32(ecast524);
-  this->type = (LockType::type)ecast524;
+  int32_t ecast532;
+  xfer += iprot->readI32(ecast532);
+  this->type = (LockType::type)ecast532;
   isset_type = true;
 } else {
   xfer += iprot->skip(ftype);
@@ -12280,9 +12386,9 @@ uint32_t 
LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
 break;
   case 2:
 if (ftype == ::apache::thrift::protocol::T_I32) {
-  int32_t ecast525;
-  xfer += 

[03/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index f985954..c9e44f9 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -12197,6 +12197,107 @@ class AbortTxnRequest {
 
 }
 
+class AbortTxnsRequest {
+  static $_TSPEC;
+
+  /**
+   * @var int[]
+   */
+  public $txn_ids = null;
+
+  public function __construct($vals=null) {
+if (!isset(self::$_TSPEC)) {
+  self::$_TSPEC = array(
+1 => array(
+  'var' => 'txn_ids',
+  'type' => TType::LST,
+  'etype' => TType::I64,
+  'elem' => array(
+'type' => TType::I64,
+),
+  ),
+);
+}
+if (is_array($vals)) {
+  if (isset($vals['txn_ids'])) {
+$this->txn_ids = $vals['txn_ids'];
+  }
+}
+  }
+
+  public function getName() {
+return 'AbortTxnsRequest';
+  }
+
+  public function read($input)
+  {
+$xfer = 0;
+$fname = null;
+$ftype = 0;
+$fid = 0;
+$xfer += $input->readStructBegin($fname);
+while (true)
+{
+  $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+  if ($ftype == TType::STOP) {
+break;
+  }
+  switch ($fid)
+  {
+case 1:
+  if ($ftype == TType::LST) {
+$this->txn_ids = array();
+$_size414 = 0;
+$_etype417 = 0;
+$xfer += $input->readListBegin($_etype417, $_size414);
+for ($_i418 = 0; $_i418 < $_size414; ++$_i418)
+{
+  $elem419 = null;
+  $xfer += $input->readI64($elem419);
+  $this->txn_ids []= $elem419;
+}
+$xfer += $input->readListEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+default:
+  $xfer += $input->skip($ftype);
+  break;
+  }
+  $xfer += $input->readFieldEnd();
+}
+$xfer += $input->readStructEnd();
+return $xfer;
+  }
+
+  public function write($output) {
+$xfer = 0;
+$xfer += $output->writeStructBegin('AbortTxnsRequest');
+if ($this->txn_ids !== null) {
+  if (!is_array($this->txn_ids)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('txn_ids', TType::LST, 1);
+  {
+$output->writeListBegin(TType::I64, count($this->txn_ids));
+{
+  foreach ($this->txn_ids as $iter420)
+  {
+$xfer += $output->writeI64($iter420);
+  }
+}
+$output->writeListEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
+$xfer += $output->writeFieldStop();
+$xfer += $output->writeStructEnd();
+return $xfer;
+  }
+
+}
+
 class CommitTxnRequest {
   static $_TSPEC;
 
@@ -12534,15 +12635,15 @@ class LockRequest {
 case 1:
   if ($ftype == TType::LST) {
 $this->component = array();
-$_size414 = 0;
-$_etype417 = 0;
-$xfer += $input->readListBegin($_etype417, $_size414);
-for ($_i418 = 0; $_i418 < $_size414; ++$_i418)
+$_size421 = 0;
+$_etype424 = 0;
+$xfer += $input->readListBegin($_etype424, $_size421);
+for ($_i425 = 0; $_i425 < $_size421; ++$_i425)
 {
-  $elem419 = null;
-  $elem419 = new \metastore\LockComponent();
-  $xfer += $elem419->read($input);
-  $this->component []= $elem419;
+  $elem426 = null;
+  $elem426 = new \metastore\LockComponent();
+  $xfer += $elem426->read($input);
+  $this->component []= $elem426;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -12598,9 +12699,9 @@ class LockRequest {
   {
 $output->writeListBegin(TType::STRUCT, count($this->component));
 {
-  foreach ($this->component as $iter420)
+  foreach ($this->component as $iter427)
   {
-$xfer += $iter420->write($output);
+$xfer += $iter427->write($output);
   }
 }
 $output->writeListEnd();
@@ -13543,15 +13644,15 @@ class ShowLocksResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->locks = array();
-$_size421 = 0;
-$_etype424 = 0;
-$xfer += $input->readListBegin($_etype424, $_size421);
-for ($_i425 = 0; $_i425 < $_size421; ++$_i425)
+$_size428 = 0;
+$_etype431 = 0;
+$xfer += $input->readListBegin($_etype431, $_size428);
+for ($_i432 = 0; $_i432 < $_size428; 

[01/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 10c07d6ec -> 6c4c63694


http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 953c97c..0de4f60 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -8418,6 +8418,81 @@ class AbortTxnRequest:
   def __ne__(self, other):
 return not (self == other)
 
+class AbortTxnsRequest:
+  """
+  Attributes:
+   - txn_ids
+  """
+
+  thrift_spec = (
+None, # 0
+(1, TType.LIST, 'txn_ids', (TType.I64,None), None, ), # 1
+  )
+
+  def __init__(self, txn_ids=None,):
+self.txn_ids = txn_ids
+
+  def read(self, iprot):
+if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
+  fastbinary.decode_binary(self, iprot.trans, (self.__class__, 
self.thrift_spec))
+  return
+iprot.readStructBegin()
+while True:
+  (fname, ftype, fid) = iprot.readFieldBegin()
+  if ftype == TType.STOP:
+break
+  if fid == 1:
+if ftype == TType.LIST:
+  self.txn_ids = []
+  (_etype416, _size413) = iprot.readListBegin()
+  for _i417 in xrange(_size413):
+_elem418 = iprot.readI64()
+self.txn_ids.append(_elem418)
+  iprot.readListEnd()
+else:
+  iprot.skip(ftype)
+  else:
+iprot.skip(ftype)
+  iprot.readFieldEnd()
+iprot.readStructEnd()
+
+  def write(self, oprot):
+if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
self.thrift_spec is not None and fastbinary is not None:
+  oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, 
self.thrift_spec)))
+  return
+oprot.writeStructBegin('AbortTxnsRequest')
+if self.txn_ids is not None:
+  oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
+  oprot.writeListBegin(TType.I64, len(self.txn_ids))
+  for iter419 in self.txn_ids:
+oprot.writeI64(iter419)
+  oprot.writeListEnd()
+  oprot.writeFieldEnd()
+oprot.writeFieldStop()
+oprot.writeStructEnd()
+
+  def validate(self):
+if self.txn_ids is None:
+  raise TProtocol.TProtocolException(message='Required field txn_ids is 
unset!')
+return
+
+
+  def __hash__(self):
+value = 17
+value = (value * 31) ^ hash(self.txn_ids)
+return value
+
+  def __repr__(self):
+L = ['%s=%r' % (key, value)
+  for key, value in self.__dict__.iteritems()]
+return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+return isinstance(other, self.__class__) and self.__dict__ == 
other.__dict__
+
+  def __ne__(self, other):
+return not (self == other)
+
 class CommitTxnRequest:
   """
   Attributes:
@@ -8646,11 +8721,11 @@ class LockRequest:
   if fid == 1:
 if ftype == TType.LIST:
   self.component = []
-  (_etype416, _size413) = iprot.readListBegin()
-  for _i417 in xrange(_size413):
-_elem418 = LockComponent()
-_elem418.read(iprot)
-self.component.append(_elem418)
+  (_etype423, _size420) = iprot.readListBegin()
+  for _i424 in xrange(_size420):
+_elem425 = LockComponent()
+_elem425.read(iprot)
+self.component.append(_elem425)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -8687,8 +8762,8 @@ class LockRequest:
 if self.component is not None:
   oprot.writeFieldBegin('component', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.component))
-  for iter419 in self.component:
-iter419.write(oprot)
+  for iter426 in self.component:
+iter426.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 if self.txnid is not None:
@@ -9386,11 +9461,11 @@ class ShowLocksResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.locks = []
-  (_etype423, _size420) = iprot.readListBegin()
-  for _i424 in xrange(_size420):
-_elem425 = ShowLocksResponseElement()
-_elem425.read(iprot)
-self.locks.append(_elem425)
+  (_etype430, _size427) = iprot.readListBegin()
+  for _i431 in xrange(_size427):
+_elem432 = ShowLocksResponseElement()
+_elem432.read(iprot)
+self.locks.append(_elem432)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -9407,8 +9482,8 @@ class ShowLocksResponse:
 if self.locks is not None:
   oprot.writeFieldBegin('locks', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, 

[05/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index f4700a1..ee40698 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -296,6 +296,8 @@ public class ThriftHiveMetastore {
 
 public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException;
 
+public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException;
+
 public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException;
 
 public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException;
@@ -592,6 +594,8 @@ public class ThriftHiveMetastore {
 
 public void abort_txn(AbortTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
+public void abort_txns(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
 public void commit_txn(CommitTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void lock(LockRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
@@ -4417,6 +4421,29 @@ public class ThriftHiveMetastore {
   return;
 }
 
+public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException
+{
+  send_abort_txns(rqst);
+  recv_abort_txns();
+}
+
+public void send_abort_txns(AbortTxnsRequest rqst) throws 
org.apache.thrift.TException
+{
+  abort_txns_args args = new abort_txns_args();
+  args.setRqst(rqst);
+  sendBase("abort_txns", args);
+}
+
+public void recv_abort_txns() throws NoSuchTxnException, 
org.apache.thrift.TException
+{
+  abort_txns_result result = new abort_txns_result();
+  receiveBase(result, "abort_txns");
+  if (result.o1 != null) {
+throw result.o1;
+  }
+  return;
+}
+
 public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException
 {
   send_commit_txn(rqst);
@@ -9447,6 +9474,38 @@ public class ThriftHiveMetastore {
   }
 }
 
+public void abort_txns(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
+  checkReady();
+  abort_txns_call method_call = new abort_txns_call(rqst, resultHandler, 
this, ___protocolFactory, ___transport);
+  this.___currentMethod = method_call;
+  ___manager.call(method_call);
+}
+
+public static class abort_txns_call extends 
org.apache.thrift.async.TAsyncMethodCall {
+  private AbortTxnsRequest rqst;
+  public abort_txns_call(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler, 
org.apache.thrift.async.TAsyncClient client, 
org.apache.thrift.protocol.TProtocolFactory protocolFactory, 
org.apache.thrift.transport.TNonblockingTransport transport) throws 
org.apache.thrift.TException {
+super(client, protocolFactory, transport, resultHandler, false);
+this.rqst = rqst;
+  }
+
+  public void write_args(org.apache.thrift.protocol.TProtocol prot) throws 
org.apache.thrift.TException {
+prot.writeMessageBegin(new 
org.apache.thrift.protocol.TMessage("abort_txns", 
org.apache.thrift.protocol.TMessageType.CALL, 0));
+abort_txns_args args = new abort_txns_args();
+args.setRqst(rqst);
+args.write(prot);
+prot.writeMessageEnd();
+  }
+
+  public void getResult() throws NoSuchTxnException, 
org.apache.thrift.TException {
+if (getState() != 
org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+  throw new IllegalStateException("Method call not finished!");
+}
+org.apache.thrift.transport.TMemoryInputTransport memoryTransport = 
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+org.apache.thrift.protocol.TProtocol prot = 
client.getProtocolFactory().getProtocol(memoryTransport);
+(new Client(prot)).recv_abort_txns();
+  }
+}
+
 public void commit_txn(CommitTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
 

[08/10] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6c4c6369/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index f731941..6639d1c 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -149,6 +149,7 @@ class ThriftHiveMetastoreIf : virtual public  
::facebook::fb303::FacebookService
   virtual void get_open_txns_info(GetOpenTxnsInfoResponse& _return) = 0;
   virtual void open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& 
rqst) = 0;
   virtual void abort_txn(const AbortTxnRequest& rqst) = 0;
+  virtual void abort_txns(const AbortTxnsRequest& rqst) = 0;
   virtual void commit_txn(const CommitTxnRequest& rqst) = 0;
   virtual void lock(LockResponse& _return, const LockRequest& rqst) = 0;
   virtual void check_lock(LockResponse& _return, const CheckLockRequest& rqst) 
= 0;
@@ -606,6 +607,9 @@ class ThriftHiveMetastoreNull : virtual public 
ThriftHiveMetastoreIf , virtual p
   void abort_txn(const AbortTxnRequest& /* rqst */) {
 return;
   }
+  void abort_txns(const AbortTxnsRequest& /* rqst */) {
+return;
+  }
   void commit_txn(const CommitTxnRequest& /* rqst */) {
 return;
   }
@@ -16795,6 +16799,110 @@ class ThriftHiveMetastore_abort_txn_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_abort_txns_args__isset {
+  _ThriftHiveMetastore_abort_txns_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_abort_txns_args__isset;
+
+class ThriftHiveMetastore_abort_txns_args {
+ public:
+
+  ThriftHiveMetastore_abort_txns_args(const 
ThriftHiveMetastore_abort_txns_args&);
+  ThriftHiveMetastore_abort_txns_args& operator=(const 
ThriftHiveMetastore_abort_txns_args&);
+  ThriftHiveMetastore_abort_txns_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txns_args() throw();
+  AbortTxnsRequest rqst;
+
+  _ThriftHiveMetastore_abort_txns_args__isset __isset;
+
+  void __set_rqst(const AbortTxnsRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txns_args & rhs) const
+  {
+if (!(rqst == rhs.rqst))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txns_args ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txns_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_abort_txns_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txns_pargs() throw();
+  const AbortTxnsRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txns_result__isset {
+  _ThriftHiveMetastore_abort_txns_result__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txns_result__isset;
+
+class ThriftHiveMetastore_abort_txns_result {
+ public:
+
+  ThriftHiveMetastore_abort_txns_result(const 
ThriftHiveMetastore_abort_txns_result&);
+  ThriftHiveMetastore_abort_txns_result& operator=(const 
ThriftHiveMetastore_abort_txns_result&);
+  ThriftHiveMetastore_abort_txns_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txns_result() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txns_result__isset __isset;
+
+  void __set_o1(const NoSuchTxnException& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txns_result & rhs) const
+  {
+if (!(o1 == rhs.o1))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txns_result ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txns_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txns_presult__isset {
+  _ThriftHiveMetastore_abort_txns_presult__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txns_presult__isset;
+
+class ThriftHiveMetastore_abort_txns_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txns_presult() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txns_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
 typedef struct _ThriftHiveMetastore_commit_txn_args__isset {
   _ThriftHiveMetastore_commit_txn_args__isset() : rqst(false) {}
   bool rqst :1;
@@ -19183,6 +19291,9 @@ class ThriftHiveMetastoreClient : virtual public 
ThriftHiveMetastoreIf, public
   void abort_txn(const AbortTxnRequest& rqst);
   void send_abort_txn(const AbortTxnRequest& rqst);
   void recv_abort_txn();
+  void abort_txns(const AbortTxnsRequest& rqst);
+  void send_abort_txns(const 

[2/3] hive git commit: HIVE-13724 : Backport HIVE-11591 to branch-1 to use undated annotations (Wei Zheng, reviewed by Sergey Shelukhin)

2016-05-09 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
index 71c73a5..21be66b 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class MetaException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("MetaException");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
index 7ad6a7e..aefbe4a 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class NoSuchLockException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("NoSuchLockException");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
index 2096c40..efa5326 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class NoSuchObjectException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("NoSuchObjectException");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
index 0831099..8149d9c 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class NoSuchTxnException extends TException implements 
org.apache.thrift.TBase

[1/3] hive git commit: HIVE-13724 : Backport HIVE-11591 to branch-1 to use undated annotations (Wei Zheng, reviewed by Sergey Shelukhin)

2016-05-09 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 7dbc53da9 -> f1950fc8a


http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java
index 460dc10..85c881e 100644
--- 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class TByteColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TByteColumn");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java
index b0e6e95..82209f1 100644
--- 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class TByteValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TByteValue");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java
index 0bb45aa..ded848f 100644
--- 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class TCLIService {
 
   public interface Iface {

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java
index 82b330b..fde0c47 100644
--- 
a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-01-14")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 public class TCancelDelegationTokenReq implements 
org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TCancelDelegationTokenReq");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f1950fc8/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java

[3/3] hive git commit: HIVE-13724 : Backport HIVE-11591 to branch-1 to use undated annotations (Wei Zheng, reviewed by Sergey Shelukhin)

2016-05-09 Thread weiz
HIVE-13724 : Backport HIVE-11591 to branch-1 to use undated annotations (Wei 
Zheng, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f1950fc8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f1950fc8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f1950fc8

Branch: refs/heads/branch-1
Commit: f1950fc8a2ac582b8d7f212ab9f142c6cd0114e3
Parents: 7dbc53d
Author: Wei Zheng 
Authored: Mon May 9 22:02:29 2016 -0700
Committer: Wei Zheng 
Committed: Mon May 9 22:02:29 2016 -0700

--
 .../org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/AggrStats.java| 2 +-
 .../apache/hadoop/hive/metastore/api/AlreadyExistsException.java   | 2 +-
 .../apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java| 2 +-
 .../apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java   | 2 +-
 .../org/apache/hadoop/hive/metastore/api/CheckLockRequest.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/ColumnStatistics.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/CompactionRequest.java| 2 +-
 .../hadoop/hive/metastore/api/ConfigValSecurityException.java  | 2 +-
 .../hadoop/hive/metastore/api/CurrentNotificationEventId.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/Database.java | 2 +-
 .../gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java| 2 +-
 .../org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java  | 2 +-
 .../gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java | 2 +-
 .../apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java   | 2 +-
 .../apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java| 2 +-
 .../org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java   | 2 +-
 .../apache/hadoop/hive/metastore/api/DropPartitionsRequest.java| 2 +-
 .../org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/EnvironmentContext.java   | 2 +-
 .../org/apache/hadoop/hive/metastore/api/FieldSchema.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/FireEventRequest.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/FireEventResponse.java| 2 +-
 .../org/apache/hadoop/hive/metastore/api/Function.java | 2 +-
 .../apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java  | 2 +-
 .../apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java  | 2 +-
 .../hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java  | 2 +-
 .../hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java | 2 +-
 .../hive/metastore/api/GetRoleGrantsForPrincipalRequest.java   | 2 +-
 .../hive/metastore/api/GetRoleGrantsForPrincipalResponse.java  | 2 +-
 .../hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java | 2 +-
 .../hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java| 2 +-
 .../apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java   | 2 +-
 .../apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java | 2 +-
 .../apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java | 2 +-
 .../hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java   | 2 +-
 .../org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/HiveObjectRef.java| 2 +-
 .../gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java   | 2 +-
 .../hadoop/hive/metastore/api/IndexAlreadyExistsException.java | 2 +-
 .../apache/hadoop/hive/metastore/api/InsertEventRequestData.java   | 2 +-
 .../apache/hadoop/hive/metastore/api/InvalidInputException.java| 2 +-
 .../apache/hadoop/hive/metastore/api/InvalidObjectException.java   | 2 +-
 .../hadoop/hive/metastore/api/InvalidOperationException.java   | 2 +-
 .../hadoop/hive/metastore/api/InvalidPartitionException.java   | 2 +-
 .../org/apache/hadoop/hive/metastore/api/LockComponent.java| 2 +-
 .../org/apache/hadoop/hive/metastore/api/LockRequest.java  | 2 +-
 .../org/apache/hadoop/hive/metastore/api/LockResponse.java | 2 +-
 .../org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java  | 2 +-
 

hive git commit: HIVE-12996 : Temp tables shouldn't be locked (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 aecb0c02e -> 763c41333


HIVE-12996 : Temp tables shouldn't be locked (Wei Zheng, reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/763c4133
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/763c4133
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/763c4133

Branch: refs/heads/branch-1
Commit: 763c41333d2299a9ef5ea73eb7212b434c21e6bf
Parents: aecb0c0
Author: Wei Zheng 
Authored: Wed May 11 11:19:38 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 11:19:38 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 11 +++---
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 36 
 2 files changed, 20 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/763c4133/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 904406e..daa31a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -167,9 +167,10 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 
 // For each source to read, get a shared lock
 for (ReadEntity input : plan.getInputs()) {
-  if (!input.needsLock() || input.isUpdateOrDelete()) {
+  if (!input.needsLock() || input.isUpdateOrDelete() ||
+  (input.getType() == Entity.Type.TABLE && 
input.getTable().isTemporary())) {
 // We don't want to acquire read locks during update or delete as 
we'll be acquiring write
-// locks instead.
+// locks instead. Also, there's no need to lock temp tables since 
they're session wide
 continue;
   }
   LockComponentBuilder compBuilder = new LockComponentBuilder();
@@ -210,9 +211,9 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // overwrite) than we need a shared.  If it's update or delete then we
 // need a SEMI-SHARED.
 for (WriteEntity output : plan.getOutputs()) {
-  if (output.getType() == Entity.Type.DFS_DIR || output.getType() ==
-  Entity.Type.LOCAL_DIR) {
-// We don't lock files or directories.
+  if (output.getType() == Entity.Type.DFS_DIR || output.getType() == 
Entity.Type.LOCAL_DIR ||
+  (output.getType() == Entity.Type.TABLE && 
output.getTable().isTemporary())) {
+// We don't lock files or directories. We also skip locking temp 
tables.
 continue;
   }
   LockComponentBuilder compBuilder = new LockComponentBuilder();

http://git-wip-us.apache.org/repos/asf/hive/blob/763c4133/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 832606b..15e1ee8 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -557,48 +557,40 @@ public class TestDbTxnManager2 {
 checkCmdOnDriver(cpr);
 LockState lockState = ((DbTxnManager) 
txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
 List locks = getLocks();
-Assert.assertEquals("Unexpected lock count", 2, locks.size());
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", 
"values__tmp__table__1", null, locks.get(0));
-checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", 
"nonAcidPart", null, locks.get(1));
-List relLocks = new ArrayList(2);
+Assert.assertEquals("Unexpected lock count", 1, locks.size());
+checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", 
"nonAcidPart", null, locks.get(0));
+List relLocks = new ArrayList(1);
 relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
-relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
 txnMgr.getLockManager().releaseLocks(relLocks);
 
 cpr = driver.compileAndRespond("insert into nonAcidPart partition(p=1) 
values(5,6)");
 checkCmdOnDriver(cpr);
 lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, 
"Practical", false);
 locks = getLocks();
-Assert.assertEquals("Unexpected lock count", 2, locks.size());
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", 
"values__tmp__table__2", null, locks.get(0));
-checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", 
"nonAcidPart", 

hive git commit: HIVE-12996 : Temp tables shouldn't be locked (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master d8f3d33b0 -> bad8525cc


HIVE-12996 : Temp tables shouldn't be locked (Wei Zheng, reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bad8525c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bad8525c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bad8525c

Branch: refs/heads/master
Commit: bad8525cc0131a919753fc700ea1906c0063931f
Parents: d8f3d33
Author: Wei Zheng 
Authored: Wed May 11 11:15:08 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 11:15:08 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 12 ---
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 36 
 2 files changed, 21 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bad8525c/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 9c2a346..8c3a1d2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.lockmgr;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -167,9 +168,10 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 
 // For each source to read, get a shared lock
 for (ReadEntity input : plan.getInputs()) {
-  if (!input.needsLock() || input.isUpdateOrDelete()) {
+  if (!input.needsLock() || input.isUpdateOrDelete() ||
+  (input.getType() == Entity.Type.TABLE && 
input.getTable().isTemporary())) {
 // We don't want to acquire read locks during update or delete as 
we'll be acquiring write
-// locks instead.
+// locks instead. Also, there's no need to lock temp tables since 
they're session wide
 continue;
   }
   LockComponentBuilder compBuilder = new LockComponentBuilder();
@@ -210,9 +212,9 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // overwrite) than we need a shared.  If it's update or delete then we
 // need a SEMI-SHARED.
 for (WriteEntity output : plan.getOutputs()) {
-  if (output.getType() == Entity.Type.DFS_DIR || output.getType() ==
-  Entity.Type.LOCAL_DIR) {
-// We don't lock files or directories.
+  if (output.getType() == Entity.Type.DFS_DIR || output.getType() == 
Entity.Type.LOCAL_DIR ||
+  (output.getType() == Entity.Type.TABLE && 
output.getTable().isTemporary())) {
+// We don't lock files or directories. We also skip locking temp 
tables.
 continue;
   }
   LockComponentBuilder compBuilder = new LockComponentBuilder();

http://git-wip-us.apache.org/repos/asf/hive/blob/bad8525c/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index c956d78..0fdf0e9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -569,48 +569,40 @@ public class TestDbTxnManager2 {
 checkCmdOnDriver(cpr);
 LockState lockState = ((DbTxnManager) 
txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
 List locks = getLocks();
-Assert.assertEquals("Unexpected lock count", 2, locks.size());
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", 
"values__tmp__table__1", null, locks.get(0));
-checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", 
"nonAcidPart", null, locks.get(1));
-List relLocks = new ArrayList(2);
+Assert.assertEquals("Unexpected lock count", 1, locks.size());
+checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", 
"nonAcidPart", null, locks.get(0));
+List relLocks = new ArrayList(1);
 relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
-relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
 txnMgr.getLockManager().releaseLocks(relLocks);
 
 cpr = driver.compileAndRespond("insert into nonAcidPart partition(p=1) 
values(5,6)");
 checkCmdOnDriver(cpr);
 

hive git commit: HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 763c41333 -> 6c160bc1c


HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6c160bc1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6c160bc1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6c160bc1

Branch: refs/heads/branch-1
Commit: 6c160bc1cdd2290861623e6437784ee39ca4eb91
Parents: 763c413
Author: Wei Zheng 
Authored: Wed May 11 14:30:55 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 14:30:55 2016 -0700

--
 .../hive/hcatalog/streaming/TestStreaming.java  |   8 +-
 .../hive/metastore/HiveMetaStoreClient.java |   6 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |  10 ++
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  28 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  25 +++-
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |   6 +-
 .../hadoop/hive/ql/plan/ShowLocksDesc.java  |   4 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 126 +++
 .../queries/clientpositive/dbtxnmgr_showlocks.q |  14 +++
 .../clientpositive/dbtxnmgr_showlocks.q.out |  47 ++-
 10 files changed, 265 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
--
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index f4ee208..6016425 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
@@ -647,13 +648,16 @@ public class TestStreaming {
 //todo: this should ideally check Transaction heartbeat as well, but 
heartbeat
 //timestamp is not reported yet
 //GetOpenTxnsInfoResponse txnresp = msClient.showTxns();
-ShowLocksResponse response = msClient.showLocks();
+ShowLocksRequest request = new ShowLocksRequest();
+request.setDbname(dbName2);
+request.setTablename(tblName2);
+ShowLocksResponse response = msClient.showLocks(request);
 Assert.assertEquals("Wrong nubmer of locks: " + response, 1, 
response.getLocks().size());
 ShowLocksResponseElement lock = response.getLocks().get(0);
 long acquiredAt = lock.getAcquiredat();
 long heartbeatAt = lock.getLastheartbeat();
 txnBatch.heartbeat();
-response = msClient.showLocks();
+response = msClient.showLocks(request);
 Assert.assertEquals("Wrong number of locks2: " + response, 1, 
response.getLocks().size());
 lock = response.getLocks().get(0);
 Assert.assertEquals("Acquired timestamp didn't match", acquiredAt, 
lock.getAcquiredat());

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 6bef3f5..94d5d86 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1914,11 +1914,17 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
   }
 
   @Override
+  @Deprecated
   public ShowLocksResponse showLocks() throws TException {
 return client.show_locks(new ShowLocksRequest());
   }
 
   @Override
+  public ShowLocksResponse showLocks(ShowLocksRequest request) throws 
TException {
+return client.show_locks(request);
+  }
+
+  @Override
   public void heartbeat(long txnid, long lockid)
   throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
   TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java

hive git commit: HIVE-13458 : Heartbeater doesn't fail query when heartbeat fails (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master b9e4fe856 -> 66a021164


HIVE-13458 : Heartbeater doesn't fail query when heartbeat fails (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66a02116
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66a02116
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66a02116

Branch: refs/heads/master
Commit: 66a02116453427601fd806fe999a753e3a201d49
Parents: b9e4fe8
Author: Wei Zheng 
Authored: Wed May 11 16:14:02 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 16:14:02 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  1 +
 .../java/org/apache/hadoop/hive/ql/Context.java | 18 ++
 .../hadoop/hive/ql/exec/mr/ExecDriver.java  |  3 +-
 .../hive/ql/exec/mr/HadoopJobExecHelper.java| 20 --
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  |  7 ++-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |  2 +-
 .../hadoop/hive/ql/io/merge/MergeFileTask.java  |  2 +-
 .../ql/io/rcfile/stats/PartialScanTask.java |  2 +-
 .../io/rcfile/truncate/ColumnTruncateTask.java  |  2 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 66 
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +++
 .../index_compact_entry_limit.q.out |  2 +-
 .../index_compact_size_limit.q.out  |  2 +-
 13 files changed, 124 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/66a02116/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f2273c0..541af57 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1138,6 +1138,7 @@ public class HiveConf extends Configuration {
 HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current 
timestamp for test", false),
 HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. 
 Will mark every ACID transaction aborted", false),
 HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For 
testing only.  Will cause CompactorMR to fail.", false),
+HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For 
testing only.  Will cause Heartbeater to fail.", false),
 
 HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
 "Merge small files at the end of a map-only job"),

http://git-wip-us.apache.org/repos/asf/hive/blob/66a02116/ql/src/java/org/apache/hadoop/hive/ql/Context.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 6f18c82..92b4e5b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -43,9 +43,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager.Heartbeater;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -121,6 +123,8 @@ public class Context {
 
   private final String stagingDir;
 
+  private Heartbeater heartbeater;
+
   public Context(Configuration conf) throws IOException {
 this(conf, generateExecutionId());
   }
@@ -760,4 +764,18 @@ public class Context {
   public CompilationOpContext getOpContext() {
 return opContext;
   }
+
+  public Heartbeater getHeartbeater() {
+return heartbeater;
+  }
+
+  public void setHeartbeater(Heartbeater heartbeater) {
+this.heartbeater = heartbeater;
+  }
+
+  public void checkHeartbeaterLockException() throws LockException {
+if (getHeartbeater() != null && getHeartbeater().getLockException() != 
null) {
+  throw getHeartbeater().getLockException();
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66a02116/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java 

hive git commit: HIVE-13458 : Heartbeater doesn't fail query when heartbeat fails (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 6c160bc1c -> 70f352728


HIVE-13458 : Heartbeater doesn't fail query when heartbeat fails (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70f35272
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70f35272
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70f35272

Branch: refs/heads/branch-1
Commit: 70f3527288593a55c1ace66fc6e0c85753f8c27c
Parents: 6c160bc
Author: Wei Zheng 
Authored: Wed May 11 16:54:25 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 16:54:25 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  1 +
 .../java/org/apache/hadoop/hive/ql/Context.java | 15 +
 .../hadoop/hive/ql/exec/mr/ExecDriver.java  |  3 +-
 .../hive/ql/exec/mr/HadoopJobExecHelper.java| 20 --
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  |  7 ++-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |  2 +-
 .../hadoop/hive/ql/io/merge/MergeFileTask.java  |  2 +-
 .../ql/io/rcfile/stats/PartialScanTask.java |  2 +-
 .../io/rcfile/truncate/ColumnTruncateTask.java  |  2 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 66 
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +++
 .../index_compact_entry_limit.q.out |  2 +-
 .../index_compact_size_limit.q.out  |  2 +-
 13 files changed, 121 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/70f35272/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 1086595..4c6aa71 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -953,6 +953,7 @@ public class HiveConf extends Configuration {
 HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current 
timestamp for test", false),
 HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. 
 Will mark every ACID transaction aborted", false),
 HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For 
testing only.  Will cause CompactorMR to fail.", false),
+HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For 
testing only.  Will cause Heartbeater to fail.", false),
 
 HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
 "Merge small files at the end of a map-only job"),

http://git-wip-us.apache.org/repos/asf/hive/blob/70f35272/ql/src/java/org/apache/hadoop/hive/ql/Context.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index a92331a..5fe08e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -44,9 +44,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager.Heartbeater;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -114,6 +116,8 @@ public class Context {
 
   private final String stagingDir;
 
+  private Heartbeater heartbeater;
+
   public Context(Configuration conf) throws IOException {
 this(conf, generateExecutionId());
   }
@@ -713,4 +717,15 @@ public class Context {
 this.cboSucceeded = cboSucceeded;
   }
 
+  public Heartbeater getHeartbeater() {
+return heartbeater;
+  }
+  public void setHeartbeater(Heartbeater heartbeater) {
+this.heartbeater = heartbeater;
+  }
+  public void checkHeartbeaterLockException() throws LockException {
+if (getHeartbeater() != null && getHeartbeater().getLockException() != 
null) {
+  throw getHeartbeater().getLockException();
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/70f35272/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 4160399..982ccc7 

hive git commit: HIVE-13753 : Make metastore client thread safe in DbTxnManager (Wei Zheng, reviewed by Vaibhav Gumashta)

2016-05-16 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 14ac6de6c -> d273fba8f


HIVE-13753 : Make metastore client thread safe in DbTxnManager (Wei Zheng, 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d273fba8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d273fba8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d273fba8

Branch: refs/heads/branch-1
Commit: d273fba8f54fcea887c2873ecf84c6cafe6d6aa6
Parents: 14ac6de
Author: Wei Zheng 
Authored: Mon May 16 10:24:39 2016 -0700
Committer: Wei Zheng 
Committed: Mon May 16 10:26:19 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |  4 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 50 +++-
 2 files changed, 50 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d273fba8/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
index ad4bd4c..089a48a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
@@ -50,11 +50,11 @@ public class DbLockManager implements HiveLockManager{
   private long MAX_SLEEP;
   //longer term we should always have a txn id and then we won't need to track 
locks here at all
   private Set locks;
-  private IMetaStoreClient client;
+  private DbTxnManager.SynchronizedMetaStoreClient client;
   private long nextSleep = 50;
   private final HiveConf conf;
 
-  DbLockManager(IMetaStoreClient client, HiveConf conf) {
+  DbLockManager(DbTxnManager.SynchronizedMetaStoreClient client, HiveConf 
conf) {
 locks = new HashSet<>();
 this.client = client;
 this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hive/blob/d273fba8/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index b0f1362..21b0cb2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -61,7 +61,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   static final private Log LOG = LogFactory.getLog(CLASS_NAME);
 
   private DbLockManager lockMgr = null;
-  private IMetaStoreClient client = null;
+  private SynchronizedMetaStoreClient client = null;
   /**
* The Metastore NEXT_TXN_ID.NTXN_NEXT is initialized to 1; it contains the 
next available
* transaction id.  Thus is 1 is first transaction id.
@@ -518,7 +518,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   }
   try {
 Hive db = Hive.get(conf);
-client = db.getMSC();
+client = new SynchronizedMetaStoreClient(db.getMSC());
 initHeartbeatExecutorService();
   } catch (MetaException e) {
 throw new 
LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e);
@@ -613,4 +613,50 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   }
 }
   }
+
+  /**
+   * Synchronized MetaStoreClient wrapper
+   */
+  final class SynchronizedMetaStoreClient {
+private final IMetaStoreClient client;
+SynchronizedMetaStoreClient(IMetaStoreClient client) {
+  this.client = client;
+}
+
+synchronized long openTxn(String user) throws TException {
+  return client.openTxn(user);
+}
+
+synchronized void commitTxn(long txnid) throws TException {
+  client.commitTxn(txnid);
+}
+
+synchronized void rollbackTxn(long txnid) throws TException {
+  client.rollbackTxn(txnid);
+}
+
+synchronized void heartbeat(long txnid, long lockid) throws TException {
+  client.heartbeat(txnid, lockid);
+}
+
+synchronized ValidTxnList getValidTxns(long currentTxn) throws TException {
+  return client.getValidTxns(currentTxn);
+}
+
+synchronized LockResponse lock(LockRequest request) throws TException {
+  return client.lock(request);
+}
+
+synchronized LockResponse checkLock(long lockid) throws TException {
+  return client.checkLock(lockid);
+}
+
+synchronized void unlock(long lockid) throws TException {
+  client.unlock(lockid);
+}
+
+synchronized ShowLocksResponse showLocks(ShowLocksRequest 
showLocksRequest) throws TException {
+  return client.showLocks(showLocksRequest);
+}
+  }
 }



hive git commit: HIVE-13753 : Make metastore client thread safe in DbTxnManager (Wei Zheng, reviewed by Vaibhav Gumashta)

2016-05-16 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 6cb5dbe64 -> bb1ee8167


HIVE-13753 : Make metastore client thread safe in DbTxnManager (Wei Zheng, 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bb1ee816
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bb1ee816
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bb1ee816

Branch: refs/heads/master
Commit: bb1ee8167006fb8ae7868502d95ebc31f6ea3dd5
Parents: 6cb5dbe
Author: Wei Zheng 
Authored: Mon May 16 10:24:39 2016 -0700
Committer: Wei Zheng 
Committed: Mon May 16 10:24:39 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |  4 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 50 +++-
 2 files changed, 50 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bb1ee816/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
index 2804514..b4ae1d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
@@ -54,11 +54,11 @@ public class DbLockManager implements HiveLockManager{
   private long MAX_SLEEP;
   //longer term we should always have a txn id and then we won't need to track 
locks here at all
   private Set locks;
-  private IMetaStoreClient client;
+  private DbTxnManager.SynchronizedMetaStoreClient client;
   private long nextSleep = 50;
   private final HiveConf conf;
 
-  DbLockManager(IMetaStoreClient client, HiveConf conf) {
+  DbLockManager(DbTxnManager.SynchronizedMetaStoreClient client, HiveConf 
conf) {
 locks = new HashSet<>();
 this.client = client;
 this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hive/blob/bb1ee816/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 4539e71..9ab6169 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -62,7 +62,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
   private DbLockManager lockMgr = null;
-  private IMetaStoreClient client = null;
+  private SynchronizedMetaStoreClient client = null;
   /**
* The Metastore NEXT_TXN_ID.NTXN_NEXT is initialized to 1; it contains the 
next available
* transaction id.  Thus is 1 is first transaction id.
@@ -520,7 +520,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   }
   try {
 Hive db = Hive.get(conf);
-client = db.getMSC();
+client = new SynchronizedMetaStoreClient(db.getMSC());
 initHeartbeatExecutorService();
   } catch (MetaException e) {
 throw new 
LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e);
@@ -615,4 +615,50 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   }
 }
   }
+
+  /**
+   * Synchronized MetaStoreClient wrapper
+   */
+  final class SynchronizedMetaStoreClient {
+private final IMetaStoreClient client;
+SynchronizedMetaStoreClient(IMetaStoreClient client) {
+  this.client = client;
+}
+
+synchronized long openTxn(String user) throws TException {
+  return client.openTxn(user);
+}
+
+synchronized void commitTxn(long txnid) throws TException {
+  client.commitTxn(txnid);
+}
+
+synchronized void rollbackTxn(long txnid) throws TException {
+  client.rollbackTxn(txnid);
+}
+
+synchronized void heartbeat(long txnid, long lockid) throws TException {
+  client.heartbeat(txnid, lockid);
+}
+
+synchronized ValidTxnList getValidTxns(long currentTxn) throws TException {
+  return client.getValidTxns(currentTxn);
+}
+
+synchronized LockResponse lock(LockRequest request) throws TException {
+  return client.lock(request);
+}
+
+synchronized LockResponse checkLock(long lockid) throws TException {
+  return client.checkLock(lockid);
+}
+
+synchronized void unlock(long lockid) throws TException {
+  client.unlock(lockid);
+}
+
+synchronized ShowLocksResponse showLocks(ShowLocksRequest 
showLocksRequest) throws TException {
+  return client.showLocks(showLocksRequest);
+}
+  }
 }



hive git commit: HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-05-13 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 917fc8727 -> 14ac6de6c


HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options, ADDENDUM (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/14ac6de6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/14ac6de6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/14ac6de6

Branch: refs/heads/branch-1
Commit: 14ac6de6c172a14c2cb6348ecec6b18fec86b17b
Parents: 917fc87
Author: Wei Zheng 
Authored: Fri May 13 16:27:06 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 13 16:27:06 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/TestDbTxnManager2.java | 14 ++
 1 file changed, 6 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/14ac6de6/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 04e556b..0770298 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -684,14 +684,12 @@ public class TestDbTxnManager2 {
 
 // SHOW LOCKS (no filter)
 List locks = getLocks();
-Assert.assertEquals("Unexpected lock count", 7, locks.size());
-// locks.get(0) is a lock on tmp table in default database used for insert
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=today", locks.get(1));
-// locks.get(2) is a lock on tmp table in default database used for insert
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=tomorrow", locks.get(3));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, 
locks.get(4));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, 
locks.get(5));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, 
locks.get(6));
+Assert.assertEquals("Unexpected lock count", 5, locks.size());
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=today", locks.get(0));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=tomorrow", locks.get(1));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, 
locks.get(2));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, 
locks.get(3));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, 
locks.get(4));
 
 // SHOW LOCKS db2
 locks = getLocksWithFilterOptions(txnMgr3, "db2", null, null);



hive git commit: HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-05-13 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 5cd5aae3c -> a9864f3f8


HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options, ADDENDUM (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a9864f3f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a9864f3f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a9864f3f

Branch: refs/heads/master
Commit: a9864f3f8c95e1dfb7a6424f97b024d384abbf87
Parents: 5cd5aae
Author: Wei Zheng 
Authored: Fri May 13 16:22:57 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 13 16:22:57 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/TestDbTxnManager2.java | 14 ++
 1 file changed, 6 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a9864f3f/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 6986b10..8840fd9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -696,14 +696,12 @@ public class TestDbTxnManager2 {
 
 // SHOW LOCKS (no filter)
 List locks = getLocks();
-Assert.assertEquals("Unexpected lock count", 7, locks.size());
-// locks.get(0) is a lock on tmp table in default database used for insert
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=today", locks.get(1));
-// locks.get(2) is a lock on tmp table in default database used for insert
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=tomorrow", locks.get(3));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, 
locks.get(4));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, 
locks.get(5));
-checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, 
locks.get(6));
+Assert.assertEquals("Unexpected lock count", 5, locks.size());
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=today", locks.get(0));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", 
"ds=tomorrow", locks.get(1));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, 
locks.get(2));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, 
locks.get(3));
+checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, 
locks.get(4));
 
 // SHOW LOCKS db2
 locks = getLocksWithFilterOptions(txnMgr3, "db2", null, null);



hive git commit: HIVE-13730 : Avoid double spilling the same partition when memory threshold is set very low (Wei Zheng, reviewed by Vikram Dixit K)

2016-05-18 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 8c4b99a4e -> 3726ce590


HIVE-13730 : Avoid double spilling the same partition when memory threshold is 
set very low (Wei Zheng, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3726ce59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3726ce59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3726ce59

Branch: refs/heads/master
Commit: 3726ce590f9dcb0e679ed6faaafa1211e9f881d3
Parents: 8c4b99a
Author: Wei Zheng 
Authored: Wed May 18 09:51:31 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 18 09:51:31 2016 -0700

--
 .../persistence/HybridHashTableContainer.java   | 22 +++-
 1 file changed, 21 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3726ce59/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 5552dfb..bb35bae 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -385,6 +385,11 @@ public class HybridHashTableContainer
 memoryUsed += hashPartitions[i].hashMap.memorySize();
   }
 }
+
+if (writeBufferSize * (numPartitions - numPartitionsSpilledOnCreation) > 
memoryThreshold) {
+  LOG.error("There is not enough memory to allocate " +
+  (numPartitions - numPartitionsSpilledOnCreation) + " hash 
partitions.");
+}
 assert numPartitionsSpilledOnCreation != numPartitions : "All partitions 
are directly spilled!" +
 " It is not supported now.";
 LOG.info("Number of partitions created: " + numPartitions);
@@ -558,7 +563,7 @@ public class HybridHashTableContainer
* @return the biggest partition number
*/
   private int biggestPartition() {
-int res = 0;
+int res = -1;
 int maxSize = 0;
 
 // If a partition has been spilled to disk, its size will be 0, i.e. it 
won't be picked
@@ -574,6 +579,17 @@ public class HybridHashTableContainer
 res = i;
   }
 }
+
+// It can happen that although there're some partitions in memory, but 
their sizes are all 0.
+// In that case we just pick one and spill.
+if (res == -1) {
+  for (int i = 0; i < hashPartitions.length; i++) {
+if (!isOnDisk(i)) {
+  return i;
+}
+  }
+}
+
 return res;
   }
 
@@ -585,6 +601,10 @@ public class HybridHashTableContainer
   public long spillPartition(int partitionId) throws IOException {
 HashPartition partition = hashPartitions[partitionId];
 int inMemRowCount = partition.hashMap.getNumValues();
+if (inMemRowCount == 0) {
+  LOG.warn("Trying to spill an empty hash partition! It may be due to " +
+  "hive.auto.convert.join.noconditionaltask.size being set too low.");
+}
 
 File file = FileUtils.createLocalDirsTempFile(
 spillLocalDirs, "partition-" + partitionId + "-", null, false);



hive git commit: HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, reviewed by Eugene Koifman)

2016-05-11 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master ece8226b6 -> 868413a37


HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/868413a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/868413a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/868413a3

Branch: refs/heads/master
Commit: 868413a37dae9d572fea810d1131de510ea3d817
Parents: ece8226
Author: Wei Zheng 
Authored: Wed May 11 14:16:45 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 11 14:16:45 2016 -0700

--
 .../hive/hcatalog/streaming/TestStreaming.java  |   8 +-
 .../hive/metastore/HiveMetaStoreClient.java |   6 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |  10 ++
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  28 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  25 +++-
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |   6 +-
 .../hadoop/hive/ql/plan/ShowLocksDesc.java  |   4 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 126 +++
 .../queries/clientpositive/dbtxnmgr_showlocks.q |  14 +++
 .../clientpositive/dbtxnmgr_showlocks.q.out |  47 ++-
 10 files changed, 265 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/868413a3/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
--
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index f4ee208..6016425 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
@@ -647,13 +648,16 @@ public class TestStreaming {
 //todo: this should ideally check Transaction heartbeat as well, but 
heartbeat
 //timestamp is not reported yet
 //GetOpenTxnsInfoResponse txnresp = msClient.showTxns();
-ShowLocksResponse response = msClient.showLocks();
+ShowLocksRequest request = new ShowLocksRequest();
+request.setDbname(dbName2);
+request.setTablename(tblName2);
+ShowLocksResponse response = msClient.showLocks(request);
 Assert.assertEquals("Wrong nubmer of locks: " + response, 1, 
response.getLocks().size());
 ShowLocksResponseElement lock = response.getLocks().get(0);
 long acquiredAt = lock.getAcquiredat();
 long heartbeatAt = lock.getLastheartbeat();
 txnBatch.heartbeat();
-response = msClient.showLocks();
+response = msClient.showLocks(request);
 Assert.assertEquals("Wrong number of locks2: " + response, 1, 
response.getLocks().size());
 lock = response.getLocks().get(0);
 Assert.assertEquals("Acquired timestamp didn't match", acquiredAt, 
lock.getAcquiredat());

http://git-wip-us.apache.org/repos/asf/hive/blob/868413a3/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 09091b8..7bca797 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2113,11 +2113,17 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
   }
 
   @Override
+  @Deprecated
   public ShowLocksResponse showLocks() throws TException {
 return client.show_locks(new ShowLocksRequest());
   }
 
   @Override
+  public ShowLocksResponse showLocks(ShowLocksRequest request) throws 
TException {
+return client.show_locks(request);
+  }
+
+  @Override
   public void heartbeat(long txnid, long lockid)
   throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
   TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/868413a3/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java

hive git commit: HIVE-12837 : Better memory estimation/allocation for hybrid grace hash join during hash table loading (Wei Zheng, reviewed by Vikram Dixit K)

2016-05-05 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master a88050bd9 -> cbebb4d78


HIVE-12837 : Better memory estimation/allocation for hybrid grace hash join 
during hash table loading (Wei Zheng, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cbebb4d7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cbebb4d7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cbebb4d7

Branch: refs/heads/master
Commit: cbebb4d78064a9098e4145a0f7532f08885c9b27
Parents: a88050b
Author: Wei Zheng 
Authored: Wed May 4 23:09:08 2016 -0700
Committer: Wei Zheng 
Committed: Wed May 4 23:09:08 2016 -0700

--
 .../persistence/HybridHashTableContainer.java   | 60 +++-
 .../ql/exec/persistence/KeyValueContainer.java  |  4 ++
 2 files changed, 51 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cbebb4d7/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index f5da5a4..5552dfb 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -90,6 +90,7 @@ public class HybridHashTableContainer
   private boolean lastPartitionInMem;   // only one (last one) 
partition is left in memory
   private final int memoryCheckFrequency;   // how often (# of rows apart) 
to check if memory is full
   private final HybridHashTableConf nwayConf; // configuration for 
n-way join
+  private int writeBufferSize;  // write buffer size for 
BytesBytesMultiHashMap
 
   /** The OI used to deserialize values. We never deserialize keys. */
   private LazyBinaryStructObjectInspector internalValueOi;
@@ -294,7 +295,6 @@ public class HybridHashTableContainer
 this.spillLocalDirs = spillLocalDirs;
 
 this.nwayConf = nwayConf;
-int writeBufferSize;
 int numPartitions;
 if (nwayConf == null) { // binary join
   numPartitions = calcNumPartitions(memoryThreshold, estimatedTableSize, 
minNumParts, minWbSize);
@@ -327,7 +327,9 @@ public class HybridHashTableContainer
 writeBufferSize : Integer.highestOneBit(writeBufferSize);
 
 // Cap WriteBufferSize to avoid large preallocations
-writeBufferSize = writeBufferSize < minWbSize ? minWbSize : 
Math.min(maxWbSize, writeBufferSize);
+// We also want to limit the size of writeBuffer, because we normally have 
16 partitions, that
+// makes spilling prediction (isMemoryFull) to be too defensive which 
results in unnecessary spilling
+writeBufferSize = writeBufferSize < minWbSize ? minWbSize : 
Math.min(maxWbSize / numPartitions, writeBufferSize);
 
 this.bloom1 = new BloomFilter(newKeyCount);
 
@@ -417,6 +419,11 @@ public class HybridHashTableContainer
 for (HashPartition hp : hashPartitions) {
   if (hp.hashMap != null) {
 memUsed += hp.hashMap.memorySize();
+  } else {
+// also include the still-in-memory sidefile, before it has been 
truely spilled
+if (hp.sidefileKVContainer != null) {
+  memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * 
tableRowSize;
+}
   }
 }
 return memoryUsed = memUsed;
@@ -454,6 +461,8 @@ public class HybridHashTableContainer
   private MapJoinKey internalPutRow(KeyValueHelper keyValueHelper,
   Writable currentKey, Writable currentValue) throws SerDeException, 
IOException {
 
+boolean putToSidefile = false; // by default we put row into partition in 
memory
+
 // Next, put row into corresponding hash partition
 int keyHash = keyValueHelper.getHashFromKey();
 int partitionId = keyHash & (hashPartitions.length - 1);
@@ -461,15 +470,13 @@ public class HybridHashTableContainer
 
 bloom1.addLong(keyHash);
 
-if (isOnDisk(partitionId) || isHashMapSpilledOnCreation(partitionId)) {
-  KeyValueContainer kvContainer = hashPartition.getSidefileKVContainer();
-  kvContainer.add((HiveKey) currentKey, (BytesWritable) currentValue);
-} else {
-  hashPartition.hashMap.put(keyValueHelper, keyHash); // Pass along 
hashcode to avoid recalculation
-  totalInMemRowCount++;
-
-  if ((totalInMemRowCount & (this.memoryCheckFrequency - 1)) == 0 &&  // 
check periodically
-  !lastPartitionInMem) { // If this is the only partition in memory, 
proceed without check
+if (isOnDisk(partitionId) || isHashMapSpilledOnCreation(partitionId)) { // 
destination on 

[2/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 4fc2da6..fe25366 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -10850,6 +10850,107 @@ class AbortTxnRequest {
 
 }
 
+class AbortTxnsRequest {
+  static $_TSPEC;
+
+  /**
+   * @var int[]
+   */
+  public $txn_ids = null;
+
+  public function __construct($vals=null) {
+if (!isset(self::$_TSPEC)) {
+  self::$_TSPEC = array(
+1 => array(
+  'var' => 'txn_ids',
+  'type' => TType::LST,
+  'etype' => TType::I64,
+  'elem' => array(
+'type' => TType::I64,
+),
+  ),
+);
+}
+if (is_array($vals)) {
+  if (isset($vals['txn_ids'])) {
+$this->txn_ids = $vals['txn_ids'];
+  }
+}
+  }
+
+  public function getName() {
+return 'AbortTxnsRequest';
+  }
+
+  public function read($input)
+  {
+$xfer = 0;
+$fname = null;
+$ftype = 0;
+$fid = 0;
+$xfer += $input->readStructBegin($fname);
+while (true)
+{
+  $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+  if ($ftype == TType::STOP) {
+break;
+  }
+  switch ($fid)
+  {
+case 1:
+  if ($ftype == TType::LST) {
+$this->txn_ids = array();
+$_size400 = 0;
+$_etype403 = 0;
+$xfer += $input->readListBegin($_etype403, $_size400);
+for ($_i404 = 0; $_i404 < $_size400; ++$_i404)
+{
+  $elem405 = null;
+  $xfer += $input->readI64($elem405);
+  $this->txn_ids []= $elem405;
+}
+$xfer += $input->readListEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+default:
+  $xfer += $input->skip($ftype);
+  break;
+  }
+  $xfer += $input->readFieldEnd();
+}
+$xfer += $input->readStructEnd();
+return $xfer;
+  }
+
+  public function write($output) {
+$xfer = 0;
+$xfer += $output->writeStructBegin('AbortTxnsRequest');
+if ($this->txn_ids !== null) {
+  if (!is_array($this->txn_ids)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('txn_ids', TType::LST, 1);
+  {
+$output->writeListBegin(TType::I64, count($this->txn_ids));
+{
+  foreach ($this->txn_ids as $iter406)
+  {
+$xfer += $output->writeI64($iter406);
+  }
+}
+$output->writeListEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
+$xfer += $output->writeFieldStop();
+$xfer += $output->writeStructEnd();
+return $xfer;
+  }
+
+}
+
 class CommitTxnRequest {
   static $_TSPEC;
 
@@ -11187,15 +11288,15 @@ class LockRequest {
 case 1:
   if ($ftype == TType::LST) {
 $this->component = array();
-$_size400 = 0;
-$_etype403 = 0;
-$xfer += $input->readListBegin($_etype403, $_size400);
-for ($_i404 = 0; $_i404 < $_size400; ++$_i404)
+$_size407 = 0;
+$_etype410 = 0;
+$xfer += $input->readListBegin($_etype410, $_size407);
+for ($_i411 = 0; $_i411 < $_size407; ++$_i411)
 {
-  $elem405 = null;
-  $elem405 = new \metastore\LockComponent();
-  $xfer += $elem405->read($input);
-  $this->component []= $elem405;
+  $elem412 = null;
+  $elem412 = new \metastore\LockComponent();
+  $xfer += $elem412->read($input);
+  $this->component []= $elem412;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11251,9 +11352,9 @@ class LockRequest {
   {
 $output->writeListBegin(TType::STRUCT, count($this->component));
 {
-  foreach ($this->component as $iter406)
+  foreach ($this->component as $iter413)
   {
-$xfer += $iter406->write($output);
+$xfer += $iter413->write($output);
   }
 }
 $output->writeListEnd();
@@ -12196,15 +12297,15 @@ class ShowLocksResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->locks = array();
-$_size407 = 0;
-$_etype410 = 0;
-$xfer += $input->readListBegin($_etype410, $_size407);
-for ($_i411 = 0; $_i411 < $_size407; ++$_i411)
+$_size414 = 0;
+$_etype417 = 0;
+$xfer += $input->readListBegin($_etype417, $_size414);
+for ($_i418 = 0; $_i418 < $_size414; 

[3/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 6154d8c..438e368 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -948,6 +948,11 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf 
{
*/
   public function abort_txn(\metastore\AbortTxnRequest $rqst);
   /**
+   * @param \metastore\AbortTxnsRequest $rqst
+   * @throws \metastore\NoSuchTxnException
+   */
+  public function abort_txns(\metastore\AbortTxnsRequest $rqst);
+  /**
* @param \metastore\CommitTxnRequest $rqst
* @throws \metastore\NoSuchTxnException
* @throws \metastore\TxnAbortedException
@@ -7532,6 +7537,57 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
 return;
   }
 
+  public function abort_txns(\metastore\AbortTxnsRequest $rqst)
+  {
+$this->send_abort_txns($rqst);
+$this->recv_abort_txns();
+  }
+
+  public function send_abort_txns(\metastore\AbortTxnsRequest $rqst)
+  {
+$args = new \metastore\ThriftHiveMetastore_abort_txns_args();
+$args->rqst = $rqst;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'abort_txns', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('abort_txns', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_abort_txns()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\metastore\ThriftHiveMetastore_abort_txns_result', 
$this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \metastore\ThriftHiveMetastore_abort_txns_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->o1 !== null) {
+  throw $result->o1;
+}
+return;
+  }
+
   public function commit_txn(\metastore\CommitTxnRequest $rqst)
   {
 $this->send_commit_txn($rqst);
@@ -9372,14 +9428,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size472 = 0;
-$_etype475 = 0;
-$xfer += $input->readListBegin($_etype475, $_size472);
-for ($_i476 = 0; $_i476 < $_size472; ++$_i476)
+$_size479 = 0;
+$_etype482 = 0;
+$xfer += $input->readListBegin($_etype482, $_size479);
+for ($_i483 = 0; $_i483 < $_size479; ++$_i483)
 {
-  $elem477 = null;
-  $xfer += $input->readString($elem477);
-  $this->success []= $elem477;
+  $elem484 = null;
+  $xfer += $input->readString($elem484);
+  $this->success []= $elem484;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -9415,9 +9471,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter478)
+  foreach ($this->success as $iter485)
   {
-$xfer += $output->writeString($iter478);
+$xfer += $output->writeString($iter485);
   }
 }
 $output->writeListEnd();
@@ -9548,14 +9604,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size479 = 0;
-$_etype482 = 0;
-$xfer += $input->readListBegin($_etype482, $_size479);
-for ($_i483 = 0; $_i483 < $_size479; ++$_i483)
+$_size486 = 0;
+$_etype489 = 0;
+$xfer += $input->readListBegin($_etype489, $_size486);
+for ($_i490 = 0; $_i490 < $_size486; ++$_i490)
 {
-  $elem484 = null;
-  $xfer += $input->readString($elem484);
-  

[8/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by 
Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/aecb0c02
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/aecb0c02
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/aecb0c02

Branch: refs/heads/branch-1
Commit: aecb0c02eaf7b2ee5e448c3aaa8bda1274de78cf
Parents: f1950fc
Author: Wei Zheng 
Authored: Tue May 10 11:05:30 2016 -0700
Committer: Wei Zheng 
Committed: Tue May 10 11:05:30 2016 -0700

--
 .../apache/hadoop/hive/common/JavaUtils.java|5 +
 metastore/if/hive_metastore.thrift  |5 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2195 ++-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|  125 +
 .../ThriftHiveMetastore_server.skeleton.cpp |5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 1060 +++---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   42 +
 .../hive/metastore/api/AbortTxnsRequest.java|  438 +++
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/HeartbeatTxnRangeResponse.java  |   64 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../hadoop/hive/metastore/api/LockRequest.java  |   36 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ShowLocksResponse.java   |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 3588 +++---
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1311 ---
 .../src/gen/thrift/gen-php/metastore/Types.php  |  307 +-
 .../hive_metastore/ThriftHiveMetastore-remote   |7 +
 .../hive_metastore/ThriftHiveMetastore.py   |  931 +++--
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  205 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   17 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   58 +
 .../hadoop/hive/metastore/HiveMetaStore.java|6 +
 .../hive/metastore/HiveMetaStoreClient.java |6 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |6 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   69 +-
 .../hadoop/hive/metastore/txn/TxnStore.java |9 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   13 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|9 +
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |   19 +
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|   11 +
 .../hadoop/hive/ql/parse/IdentifiersParser.g|2 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |2 +
 .../hadoop/hive/ql/plan/AbortTxnsDesc.java  |   36 +
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java |   16 +
 .../hadoop/hive/ql/plan/HiveOperation.java  |4 +-
 .../authorization/plugin/HiveOperationType.java |1 +
 .../plugin/sqlstd/Operation2Privilege.java  |3 +-
 .../queries/clientpositive/dbtxnmgr_abort.q |6 +
 .../results/clientpositive/dbtxnmgr_abort.q.out |8 +
 44 files changed, 6842 insertions(+), 4024 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
index dc3a4ae..5bdf6f4 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
@@ -26,6 +26,7 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.URLClassLoader;
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -151,6 +152,10 @@ public final class JavaUtils {
 return "txnid:" + txnId;
   }
 
+  public static String txnIdsToString(List txnIds) {
+return "Transactions requested to be aborted: " + txnIds.toString();
+  }
+
   private JavaUtils() {
 // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/if/hive_metastore.thrift
--
diff --git a/metastore/if/hive_metastore.thrift 
b/metastore/if/hive_metastore.thrift
index f84b2a9..4b5d207 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -573,6 +573,10 @@ struct AbortTxnRequest {
 1: required i64 txnid,
 }
 
+struct AbortTxnsRequest {
+1: required list txn_ids,
+}
+
 struct CommitTxnRequest 

[4/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index bc417ef..e836154 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -266,6 +266,8 @@ public class ThriftHiveMetastore {
 
 public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException;
 
+public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException;
+
 public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException;
 
 public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException;
@@ -520,6 +522,8 @@ public class ThriftHiveMetastore {
 
 public void abort_txn(AbortTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
+public void abort_txns(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
 public void commit_txn(CommitTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void lock(LockRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
@@ -3937,6 +3941,29 @@ public class ThriftHiveMetastore {
   return;
 }
 
+public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, 
org.apache.thrift.TException
+{
+  send_abort_txns(rqst);
+  recv_abort_txns();
+}
+
+public void send_abort_txns(AbortTxnsRequest rqst) throws 
org.apache.thrift.TException
+{
+  abort_txns_args args = new abort_txns_args();
+  args.setRqst(rqst);
+  sendBase("abort_txns", args);
+}
+
+public void recv_abort_txns() throws NoSuchTxnException, 
org.apache.thrift.TException
+{
+  abort_txns_result result = new abort_txns_result();
+  receiveBase(result, "abort_txns");
+  if (result.o1 != null) {
+throw result.o1;
+  }
+  return;
+}
+
 public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, 
TxnAbortedException, org.apache.thrift.TException
 {
   send_commit_txn(rqst);
@@ -8326,6 +8353,38 @@ public class ThriftHiveMetastore {
   }
 }
 
+public void abort_txns(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
+  checkReady();
+  abort_txns_call method_call = new abort_txns_call(rqst, resultHandler, 
this, ___protocolFactory, ___transport);
+  this.___currentMethod = method_call;
+  ___manager.call(method_call);
+}
+
+public static class abort_txns_call extends 
org.apache.thrift.async.TAsyncMethodCall {
+  private AbortTxnsRequest rqst;
+  public abort_txns_call(AbortTxnsRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler, 
org.apache.thrift.async.TAsyncClient client, 
org.apache.thrift.protocol.TProtocolFactory protocolFactory, 
org.apache.thrift.transport.TNonblockingTransport transport) throws 
org.apache.thrift.TException {
+super(client, protocolFactory, transport, resultHandler, false);
+this.rqst = rqst;
+  }
+
+  public void write_args(org.apache.thrift.protocol.TProtocol prot) throws 
org.apache.thrift.TException {
+prot.writeMessageBegin(new 
org.apache.thrift.protocol.TMessage("abort_txns", 
org.apache.thrift.protocol.TMessageType.CALL, 0));
+abort_txns_args args = new abort_txns_args();
+args.setRqst(rqst);
+args.write(prot);
+prot.writeMessageEnd();
+  }
+
+  public void getResult() throws NoSuchTxnException, 
org.apache.thrift.TException {
+if (getState() != 
org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+  throw new IllegalStateException("Method call not finished!");
+}
+org.apache.thrift.transport.TMemoryInputTransport memoryTransport = 
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+org.apache.thrift.protocol.TProtocol prot = 
client.getProtocolFactory().getProtocol(memoryTransport);
+(new Client(prot)).recv_abort_txns();
+  }
+}
+
 public void commit_txn(CommitTxnRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
 

[6/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index a7f17cd..64b06c4 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -134,6 +134,7 @@ class ThriftHiveMetastoreIf : virtual public  
::facebook::fb303::FacebookService
   virtual void get_open_txns_info(GetOpenTxnsInfoResponse& _return) = 0;
   virtual void open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& 
rqst) = 0;
   virtual void abort_txn(const AbortTxnRequest& rqst) = 0;
+  virtual void abort_txns(const AbortTxnsRequest& rqst) = 0;
   virtual void commit_txn(const CommitTxnRequest& rqst) = 0;
   virtual void lock(LockResponse& _return, const LockRequest& rqst) = 0;
   virtual void check_lock(LockResponse& _return, const CheckLockRequest& rqst) 
= 0;
@@ -535,6 +536,9 @@ class ThriftHiveMetastoreNull : virtual public 
ThriftHiveMetastoreIf , virtual p
   void abort_txn(const AbortTxnRequest& /* rqst */) {
 return;
   }
+  void abort_txns(const AbortTxnsRequest& /* rqst */) {
+return;
+  }
   void commit_txn(const CommitTxnRequest& /* rqst */) {
 return;
   }
@@ -14981,6 +14985,110 @@ class ThriftHiveMetastore_abort_txn_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_abort_txns_args__isset {
+  _ThriftHiveMetastore_abort_txns_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_abort_txns_args__isset;
+
+class ThriftHiveMetastore_abort_txns_args {
+ public:
+
+  ThriftHiveMetastore_abort_txns_args(const 
ThriftHiveMetastore_abort_txns_args&);
+  ThriftHiveMetastore_abort_txns_args& operator=(const 
ThriftHiveMetastore_abort_txns_args&);
+  ThriftHiveMetastore_abort_txns_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txns_args() throw();
+  AbortTxnsRequest rqst;
+
+  _ThriftHiveMetastore_abort_txns_args__isset __isset;
+
+  void __set_rqst(const AbortTxnsRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txns_args & rhs) const
+  {
+if (!(rqst == rhs.rqst))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txns_args ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txns_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_abort_txns_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txns_pargs() throw();
+  const AbortTxnsRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txns_result__isset {
+  _ThriftHiveMetastore_abort_txns_result__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txns_result__isset;
+
+class ThriftHiveMetastore_abort_txns_result {
+ public:
+
+  ThriftHiveMetastore_abort_txns_result(const 
ThriftHiveMetastore_abort_txns_result&);
+  ThriftHiveMetastore_abort_txns_result& operator=(const 
ThriftHiveMetastore_abort_txns_result&);
+  ThriftHiveMetastore_abort_txns_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txns_result() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txns_result__isset __isset;
+
+  void __set_o1(const NoSuchTxnException& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txns_result & rhs) const
+  {
+if (!(o1 == rhs.o1))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txns_result ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txns_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txns_presult__isset {
+  _ThriftHiveMetastore_abort_txns_presult__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txns_presult__isset;
+
+class ThriftHiveMetastore_abort_txns_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txns_presult() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txns_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
 typedef struct _ThriftHiveMetastore_commit_txn_args__isset {
   _ThriftHiveMetastore_commit_txn_args__isset() : rqst(false) {}
   bool rqst :1;
@@ -16730,6 +16838,9 @@ class ThriftHiveMetastoreClient : virtual public 
ThriftHiveMetastoreIf, public
   void abort_txn(const AbortTxnRequest& rqst);
   void send_abort_txn(const AbortTxnRequest& rqst);
   void recv_abort_txn();
+  void abort_txns(const AbortTxnsRequest& rqst);
+  void send_abort_txns(const 

[1/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 f1950fc8a -> aecb0c02e


http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 596fdf5..c59fa3e 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -7483,6 +7483,81 @@ class AbortTxnRequest:
   def __ne__(self, other):
 return not (self == other)
 
+class AbortTxnsRequest:
+  """
+  Attributes:
+   - txn_ids
+  """
+
+  thrift_spec = (
+None, # 0
+(1, TType.LIST, 'txn_ids', (TType.I64,None), None, ), # 1
+  )
+
+  def __init__(self, txn_ids=None,):
+self.txn_ids = txn_ids
+
+  def read(self, iprot):
+if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
+  fastbinary.decode_binary(self, iprot.trans, (self.__class__, 
self.thrift_spec))
+  return
+iprot.readStructBegin()
+while True:
+  (fname, ftype, fid) = iprot.readFieldBegin()
+  if ftype == TType.STOP:
+break
+  if fid == 1:
+if ftype == TType.LIST:
+  self.txn_ids = []
+  (_etype402, _size399) = iprot.readListBegin()
+  for _i403 in xrange(_size399):
+_elem404 = iprot.readI64()
+self.txn_ids.append(_elem404)
+  iprot.readListEnd()
+else:
+  iprot.skip(ftype)
+  else:
+iprot.skip(ftype)
+  iprot.readFieldEnd()
+iprot.readStructEnd()
+
+  def write(self, oprot):
+if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
self.thrift_spec is not None and fastbinary is not None:
+  oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, 
self.thrift_spec)))
+  return
+oprot.writeStructBegin('AbortTxnsRequest')
+if self.txn_ids is not None:
+  oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
+  oprot.writeListBegin(TType.I64, len(self.txn_ids))
+  for iter405 in self.txn_ids:
+oprot.writeI64(iter405)
+  oprot.writeListEnd()
+  oprot.writeFieldEnd()
+oprot.writeFieldStop()
+oprot.writeStructEnd()
+
+  def validate(self):
+if self.txn_ids is None:
+  raise TProtocol.TProtocolException(message='Required field txn_ids is 
unset!')
+return
+
+
+  def __hash__(self):
+value = 17
+value = (value * 31) ^ hash(self.txn_ids)
+return value
+
+  def __repr__(self):
+L = ['%s=%r' % (key, value)
+  for key, value in self.__dict__.iteritems()]
+return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+return isinstance(other, self.__class__) and self.__dict__ == 
other.__dict__
+
+  def __ne__(self, other):
+return not (self == other)
+
 class CommitTxnRequest:
   """
   Attributes:
@@ -7711,11 +7786,11 @@ class LockRequest:
   if fid == 1:
 if ftype == TType.LIST:
   self.component = []
-  (_etype402, _size399) = iprot.readListBegin()
-  for _i403 in xrange(_size399):
-_elem404 = LockComponent()
-_elem404.read(iprot)
-self.component.append(_elem404)
+  (_etype409, _size406) = iprot.readListBegin()
+  for _i410 in xrange(_size406):
+_elem411 = LockComponent()
+_elem411.read(iprot)
+self.component.append(_elem411)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -7752,8 +7827,8 @@ class LockRequest:
 if self.component is not None:
   oprot.writeFieldBegin('component', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.component))
-  for iter405 in self.component:
-iter405.write(oprot)
+  for iter412 in self.component:
+iter412.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 if self.txnid is not None:
@@ -8451,11 +8526,11 @@ class ShowLocksResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.locks = []
-  (_etype409, _size406) = iprot.readListBegin()
-  for _i410 in xrange(_size406):
-_elem411 = ShowLocksResponseElement()
-_elem411.read(iprot)
-self.locks.append(_elem411)
+  (_etype416, _size413) = iprot.readListBegin()
+  for _i417 in xrange(_size413):
+_elem418 = ShowLocksResponseElement()
+_elem418.read(iprot)
+self.locks.append(_elem418)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -8472,8 +8547,8 @@ class ShowLocksResponse:
 if self.locks is not None:
   oprot.writeFieldBegin('locks', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, 

[7/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 5efda4f..176b634 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size649;
-::apache::thrift::protocol::TType _etype652;
-xfer += iprot->readListBegin(_etype652, _size649);
-this->success.resize(_size649);
-uint32_t _i653;
-for (_i653 = 0; _i653 < _size649; ++_i653)
+uint32_t _size657;
+::apache::thrift::protocol::TType _etype660;
+xfer += iprot->readListBegin(_etype660, _size657);
+this->success.resize(_size657);
+uint32_t _i661;
+for (_i661 = 0; _i661 < _size657; ++_i661)
 {
-  xfer += iprot->readString(this->success[_i653]);
+  xfer += iprot->readString(this->success[_i661]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter654;
-  for (_iter654 = this->success.begin(); _iter654 != this->success.end(); 
++_iter654)
+  std::vector ::const_iterator _iter662;
+  for (_iter662 = this->success.begin(); _iter662 != this->success.end(); 
++_iter662)
   {
-xfer += oprot->writeString((*_iter654));
+xfer += oprot->writeString((*_iter662));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size655;
-::apache::thrift::protocol::TType _etype658;
-xfer += iprot->readListBegin(_etype658, _size655);
-(*(this->success)).resize(_size655);
-uint32_t _i659;
-for (_i659 = 0; _i659 < _size655; ++_i659)
+uint32_t _size663;
+::apache::thrift::protocol::TType _etype666;
+xfer += iprot->readListBegin(_etype666, _size663);
+(*(this->success)).resize(_size663);
+uint32_t _i667;
+for (_i667 = 0; _i667 < _size663; ++_i667)
 {
-  xfer += iprot->readString((*(this->success))[_i659]);
+  xfer += iprot->readString((*(this->success))[_i667]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size660;
-::apache::thrift::protocol::TType _etype663;
-xfer += iprot->readListBegin(_etype663, _size660);
-this->success.resize(_size660);
-uint32_t _i664;
-for (_i664 = 0; _i664 < _size660; ++_i664)
+uint32_t _size668;
+::apache::thrift::protocol::TType _etype671;
+xfer += iprot->readListBegin(_etype671, _size668);
+this->success.resize(_size668);
+uint32_t _i672;
+for (_i672 = 0; _i672 < _size668; ++_i672)
 {
-  xfer += iprot->readString(this->success[_i664]);
+  xfer += iprot->readString(this->success[_i672]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter665;
-  for (_iter665 = this->success.begin(); _iter665 != this->success.end(); 
++_iter665)
+  std::vector ::const_iterator _iter673;
+  for (_iter673 = this->success.begin(); _iter673 != this->success.end(); 
++_iter673)
   {
-xfer += oprot->writeString((*_iter665));
+xfer += oprot->writeString((*_iter673));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[5/8] hive git commit: HIVE-12634 : Add command to kill an ACID transacton (Wei Zheng, reviewed by Eugene Koifman)

2016-05-10 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/aecb0c02/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
new file mode 100644
index 000..1bf4655
--- /dev/null
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class AbortTxnsRequest implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AbortTxnsRequest");
+
+  private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txn_ids", 
org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
AbortTxnsRequestStandardSchemeFactory());
+schemes.put(TupleScheme.class, new AbortTxnsRequestTupleSchemeFactory());
+  }
+
+  private List txn_ids; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+TXN_IDS((short)1, "txn_ids");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // TXN_IDS
+  return TXN_IDS;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
+  static {
+Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+tmpMap.put(_Fields.TXN_IDS, new 
org.apache.thrift.meta_data.FieldMetaData("txn_ids", 
org.apache.thrift.TFieldRequirementType.REQUIRED, 
+new 
org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64;
+metaDataMap = Collections.unmodifiableMap(tmpMap);
+

hive git commit: HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by Pengcheng Xiong)

2016-07-26 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 c92af0dcd -> 2812ca001


HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by 
Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2812ca00
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2812ca00
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2812ca00

Branch: refs/heads/branch-2.1
Commit: 2812ca00100ee1e0252635ab3e74352678cf36bb
Parents: c92af0d
Author: Wei Zheng 
Authored: Tue Jul 26 15:45:38 2016 -0700
Committer: Wei Zheng 
Committed: Tue Jul 26 15:45:38 2016 -0700

--
 .../queries/clientpositive/acid_globallimit.q   |  4 --
 .../clientpositive/acid_globallimit.q.out   | 73 
 .../clientpositive/tez/acid_globallimit.q.out   | 21 --
 3 files changed, 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2812ca00/ql/src/test/queries/clientpositive/acid_globallimit.q
--
diff --git a/ql/src/test/queries/clientpositive/acid_globallimit.q 
b/ql/src/test/queries/clientpositive/acid_globallimit.q
index 7f04c2b..7fa4c07 100644
--- a/ql/src/test/queries/clientpositive/acid_globallimit.q
+++ b/ql/src/test/queries/clientpositive/acid_globallimit.q
@@ -11,10 +11,6 @@ TBLPROPERTIES ("transactional"="true");
 
 insert into table acidtest1 select cint, cstring1 from alltypesorc where cint 
is not null order by cint;
 
-desc formatted acidtest1;
-
-explain
-select cast (c1 as string) from acidtest1 limit 10;
 select cast (c1 as string) from acidtest1 limit 10;
 
 drop table acidtest1;

http://git-wip-us.apache.org/repos/asf/hive/blob/2812ca00/ql/src/test/results/clientpositive/acid_globallimit.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_globallimit.q.out 
b/ql/src/test/results/clientpositive/acid_globallimit.q.out
index d5ed34f..fe738a6 100644
--- a/ql/src/test/results/clientpositive/acid_globallimit.q.out
+++ b/ql/src/test/results/clientpositive/acid_globallimit.q.out
@@ -24,79 +24,6 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@acidtest1
 POSTHOOK: Lineage: acidtest1.c1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: acidtest1.c2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, 
comment:null), ]
-PREHOOK: query: desc formatted acidtest1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@acidtest1
-POSTHOOK: query: desc formatted acidtest1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@acidtest1
-# col_name data_type   comment 
-
-c1 int 
-c2 string  
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:MANAGED_TABLE
-Table Parameters:   
-   numFiles3   
-   numRows 0   
-   rawDataSize 0   
-   totalSize   101663  
-   transactional   true
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-InputFormat:   org.apache.hadoop.hive.ql.io.orc.OrcInputFormat  
-OutputFormat:  org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 
-Compressed:No   
-Num Buckets:   3
-Bucket Columns:[c1] 
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: acidtest1
-Statistics: Num rows: 25415 Data size: 101663 Basic stats: 
COMPLETE Column stats: NONE
-Select Operator
-  expressions: UDFToString(c1) (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 25415 Data size: 101663 

hive git commit: HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by Pengcheng Xiong)

2016-07-26 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master abbe4ecc9 -> 7b6516d7f


HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by 
Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7b6516d7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7b6516d7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7b6516d7

Branch: refs/heads/master
Commit: 7b6516d7fc2916afb7f1654356c1e6cd120d3cb3
Parents: abbe4ec
Author: Wei Zheng 
Authored: Tue Jul 26 15:44:45 2016 -0700
Committer: Wei Zheng 
Committed: Tue Jul 26 15:44:45 2016 -0700

--
 .../queries/clientpositive/acid_globallimit.q   |  4 --
 .../clientpositive/acid_globallimit.q.out   | 73 
 .../clientpositive/tez/acid_globallimit.q.out   | 56 ---
 3 files changed, 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7b6516d7/ql/src/test/queries/clientpositive/acid_globallimit.q
--
diff --git a/ql/src/test/queries/clientpositive/acid_globallimit.q 
b/ql/src/test/queries/clientpositive/acid_globallimit.q
index 7f04c2b..7fa4c07 100644
--- a/ql/src/test/queries/clientpositive/acid_globallimit.q
+++ b/ql/src/test/queries/clientpositive/acid_globallimit.q
@@ -11,10 +11,6 @@ TBLPROPERTIES ("transactional"="true");
 
 insert into table acidtest1 select cint, cstring1 from alltypesorc where cint 
is not null order by cint;
 
-desc formatted acidtest1;
-
-explain
-select cast (c1 as string) from acidtest1 limit 10;
 select cast (c1 as string) from acidtest1 limit 10;
 
 drop table acidtest1;

http://git-wip-us.apache.org/repos/asf/hive/blob/7b6516d7/ql/src/test/results/clientpositive/acid_globallimit.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_globallimit.q.out 
b/ql/src/test/results/clientpositive/acid_globallimit.q.out
index 93246e8..fe738a6 100644
--- a/ql/src/test/results/clientpositive/acid_globallimit.q.out
+++ b/ql/src/test/results/clientpositive/acid_globallimit.q.out
@@ -24,79 +24,6 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@acidtest1
 POSTHOOK: Lineage: acidtest1.c1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: acidtest1.c2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, 
comment:null), ]
-PREHOOK: query: desc formatted acidtest1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@acidtest1
-POSTHOOK: query: desc formatted acidtest1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@acidtest1
-# col_name data_type   comment 
-
-c1 int 
-c2 string  
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:MANAGED_TABLE
-Table Parameters:   
-   numFiles3   
-   numRows 0   
-   rawDataSize 0   
-   totalSize   102202  
-   transactional   true
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-InputFormat:   org.apache.hadoop.hive.ql.io.orc.OrcInputFormat  
-OutputFormat:  org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 
-Compressed:No   
-Num Buckets:   3
-Bucket Columns:[c1] 
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: acidtest1
-Statistics: Num rows: 25550 Data size: 102202 Basic stats: 
COMPLETE Column stats: NONE
-Select Operator
-  expressions: UDFToString(c1) (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 25550 Data size: 102202 

hive git commit: HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by Pengcheng Xiong)

2016-07-26 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 c9615c792 -> 57ca0b57c


HIVE-14339 : Fix UT failure for acid_globallimit.q (Wei Zheng, reviewed by 
Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57ca0b57
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57ca0b57
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57ca0b57

Branch: refs/heads/branch-1
Commit: 57ca0b57c88aac2f4b46f33c2b9f85bab9254ed7
Parents: c9615c7
Author: Wei Zheng 
Authored: Tue Jul 26 16:20:17 2016 -0700
Committer: Wei Zheng 
Committed: Tue Jul 26 16:20:17 2016 -0700

--
 .../queries/clientpositive/acid_globallimit.q   |  2 -
 .../clientpositive/acid_globallimit.q.out   | 38 --
 .../clientpositive/tez/acid_globallimit.q.out   | 41 
 3 files changed, 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/57ca0b57/ql/src/test/queries/clientpositive/acid_globallimit.q
--
diff --git a/ql/src/test/queries/clientpositive/acid_globallimit.q 
b/ql/src/test/queries/clientpositive/acid_globallimit.q
index 5968e6b..4a8b810 100644
--- a/ql/src/test/queries/clientpositive/acid_globallimit.q
+++ b/ql/src/test/queries/clientpositive/acid_globallimit.q
@@ -12,8 +12,6 @@ TBLPROPERTIES ("transactional"="true");
 
 insert into table acidtest1 select cint, cstring1 from alltypesorc where cint 
is not null order by cint;
 
-explain
-select cast (c1 as string) from acidtest1 limit 10;
 select cast (c1 as string) from acidtest1 limit 10;
 
 drop table acidtest1;

http://git-wip-us.apache.org/repos/asf/hive/blob/57ca0b57/ql/src/test/results/clientpositive/acid_globallimit.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_globallimit.q.out 
b/ql/src/test/results/clientpositive/acid_globallimit.q.out
index 783e41b..fe738a6 100644
--- a/ql/src/test/results/clientpositive/acid_globallimit.q.out
+++ b/ql/src/test/results/clientpositive/acid_globallimit.q.out
@@ -24,44 +24,6 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@acidtest1
 POSTHOOK: Lineage: acidtest1.c1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: acidtest1.c2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, 
comment:null), ]
-PREHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select cast (c1 as string) from acidtest1 limit 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: acidtest1
-Statistics: Num rows: 9173 Data size: 101613 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: UDFToString(c1) (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 9173 Data size: 101613 Basic stats: 
COMPLETE Column stats: NONE
-  Limit
-Number of rows: 10
-Statistics: Num rows: 10 Data size: 110 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 10 Data size: 110 Basic stats: 
COMPLETE Column stats: NONE
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-Fetch Operator
-  limit: 10
-  Processor Tree:
-ListSink
-
 PREHOOK: query: select cast (c1 as string) from acidtest1 limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acidtest1

http://git-wip-us.apache.org/repos/asf/hive/blob/57ca0b57/ql/src/test/results/clientpositive/tez/acid_globallimit.q.out
--
diff --git a/ql/src/test/results/clientpositive/tez/acid_globallimit.q.out 
b/ql/src/test/results/clientpositive/tez/acid_globallimit.q.out
index 3fa2e7c..fe738a6 100644
--- a/ql/src/test/results/clientpositive/tez/acid_globallimit.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_globallimit.q.out
@@ -24,47 +24,6 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@acidtest1
 POSTHOOK: Lineage: acidtest1.c1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: acidtest1.c2 SIMPLE 

hive git commit: HIVE-13040 : Handle empty bucket creations more efficiently (Ashutosh Chauhan, reviewed by Prasanth Jayachandran)

2016-07-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 8f500f8ad -> 3e51861a2


HIVE-13040 : Handle empty bucket creations more efficiently (Ashutosh Chauhan, 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e51861a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e51861a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e51861a

Branch: refs/heads/branch-1
Commit: 3e51861a215f62e842489f584a87b5be96316a41
Parents: 8f500f8
Author: Wei Zheng 
Authored: Thu Jul 14 15:09:48 2016 -0700
Committer: Wei Zheng 
Committed: Thu Jul 14 15:09:48 2016 -0700

--
 .../hadoop/hive/ql/exec/StatsNoJobTask.java | 67 ++-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  5 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 17 +++--
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |  8 +++
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 16 +++--
 .../hadoop/hive/ql/io/orc/OrcOutputFormat.java  | 17 ++---
 .../hive/ql/txn/compactor/CompactorMR.java  |  2 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  2 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 10 +--
 .../dynpart_sort_opt_vectorization.q.out|  4 +-
 .../tez/dynpart_sort_opt_vectorization.q.out|  8 +--
 .../tez/dynpart_sort_optimization.q.out | 70 +---
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |  2 +-
 13 files changed, 121 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3e51861a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
index 0d99cbc..fe49e15 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
@@ -123,7 +123,7 @@ public class StatsNoJobTask extends Task 
implements Serializable
 
   class StatsCollection implements Runnable {
 
-private Partition partn;
+private final Partition partn;
 
 public StatsCollection(Partition part) {
   this.partn = part;
@@ -148,7 +148,7 @@ public class StatsNoJobTask extends Task 
implements Serializable
 boolean statsAvailable = false;
 for(FileStatus file: fileList) {
   if (!file.isDir()) {
-InputFormat inputFormat = (InputFormat) 
ReflectionUtil.newInstance(
+InputFormat inputFormat = ReflectionUtil.newInstance(
 partn.getInputFormatClass(), jc);
 InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0,
 new String[] { partn.getLocation() });
@@ -193,7 +193,7 @@ public class StatsNoJobTask extends Task 
implements Serializable
 "Failed with exception " + e.getMessage() + "\n" + 
StringUtils.stringifyException(e));
 
 // Before updating the partition params, if any partition params is 
null
-// and if statsReliable is true then updatePartition() function  will 
fail 
+// and if statsReliable is true then updatePartition() function  will 
fail
 // the task by returning 1
 if (work.isStatsReliable()) {
   partUpdates.put(tPart.getSd().getLocation(), null);
@@ -244,40 +244,45 @@ public class StatsNoJobTask extends Task 
implements Serializable
   boolean statsAvailable = false;
   for(FileStatus file: fileList) {
 if (!file.isDir()) {
-  InputFormat inputFormat = (InputFormat) 
ReflectionUtil.newInstance(
+  InputFormat inputFormat = ReflectionUtil.newInstance(
   table.getInputFormatClass(), jc);
-  InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new 
String[] { table
-  .getDataLocation().toString() });
-  org.apache.hadoop.mapred.RecordReader recordReader =
-  inputFormat.getRecordReader(dummySplit, jc, Reporter.NULL);
-  StatsProvidingRecordReader statsRR;
-  if (recordReader instanceof StatsProvidingRecordReader) {
-statsRR = (StatsProvidingRecordReader) recordReader;
-numRows += statsRR.getStats().getRowCount();
-rawDataSize += statsRR.getStats().getRawDataSize();
-fileSize += file.getLen();
+  InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new 
String[]{table
+  .getDataLocation().toString()});
+  if (file.getLen() == 0) {
 numFiles += 1;
 statsAvailable = true;
+  } else {
+

hive git commit: HIVE-14222 : PTF: Operator initialization does not clean state (Wei Zheng, reviewed by Gunther Hagleitner)

2016-07-14 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 48f329701 -> b48850860


HIVE-14222 : PTF: Operator initialization does not clean state (Wei Zheng, 
reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b4885086
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b4885086
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b4885086

Branch: refs/heads/branch-2.1
Commit: b4885086077f8f20d5c624942adf6c42f0a06da1
Parents: 48f3297
Author: Wei Zheng 
Authored: Thu Jul 14 15:20:18 2016 -0700
Committer: Wei Zheng 
Committed: Thu Jul 14 15:22:01 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b4885086/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
index 37ae8fe..90e64b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
@@ -79,6 +79,7 @@ public class PTFOperator extends Operator implements 
Serializable {
 super.initializeOp(jobConf);
 hiveConf = jobConf;
 isMapOperator = conf.isMapSide();
+currentKeys = null;
 
 reconstructQueryDef(hiveConf);
 



  1   2   3   4   5   6   7   8   >