hive git commit: HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 edf89a6a0 -> 648f19307 HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/648f1930 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/648f1930 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/648f1930 Branch: refs/heads/branch-1 Commit: 648f19307cab1b55e44b930ffaf043cc93cd4d46 Parents: edf89a6 Author: Wei ZhengAuthored: Mon Apr 25 11:17:11 2016 -0700 Committer: Wei Zheng Committed: Mon Apr 25 11:19:35 2016 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 8 .../hadoop/hive/metastore/txn/TxnHandler.java | 18 +++--- .../hadoop/hive/metastore/txn/TestTxnHandler.java | 15 +++ 3 files changed, 38 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 0d31131..7c93e44 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -173,6 +173,7 @@ public class HiveConf extends Configuration { HiveConf.ConfVars.HIVE_TXN_TIMEOUT, HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE, HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, + HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE, @@ -1492,6 +1493,13 @@ public class HiveConf extends Configuration { "transactions that Hive has to track at any given time, which may negatively affect\n" + "read performance."), +HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" + +"of regular expression patterns for SQL state, error code, and error message of\n" + +"retryable SQLExceptions, that's suitable for the metastore DB.\n" + +"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" + +"The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" + +"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""), + HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false, "Whether to run the initiator and cleaner threads on this metastore instance or not.\n" + "Set this to true on one instance of the Thrift metastore service as part of turning\n" + http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index ed4a3c2..a64e7c8 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -51,6 +51,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Pattern; /** * A handler to answer transaction related calls that come into the metastore @@ -1559,7 +1560,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { } else { LOG.error("Too many repeated deadlocks in " + caller + ", giving up."); } - } else if (isRetryable(e)) { + } else if (isRetryable(conf, e)) { //in MSSQL this means Communication Link Failure if (retryNum++ < retryLimit) { LOG.warn("Retryable error detected in " + caller + ". Will wait " + retryInterval + @@ -2658,7 +2659,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { /** * Returns true if {@code ex} should be retried */ - private static boolean isRetryable(Exception ex) { + static boolean isRetryable(HiveConf conf, Exception ex) { if(ex instanceof SQLException) { SQLException sqlException = (SQLException)ex; if("08S01".equalsIgnoreCase(sqlException.getSQLState())) { @@ -2669,6 +2670,17 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
[5/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java -- diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java new file mode 100644 index 000..97b1219 --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java @@ -0,0 +1,601 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HiveServerException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveServerException"); + + private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)2); + private static final org.apache.thrift.protocol.TField SQLSTATE_FIELD_DESC = new org.apache.thrift.protocol.TField("SQLState", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map schemes = new HashMap (); + static { +schemes.put(StandardScheme.class, new HiveServerExceptionStandardSchemeFactory()); +schemes.put(TupleScheme.class, new HiveServerExceptionTupleSchemeFactory()); + } + + private String message; // required + private int errorCode; // required + private String SQLState; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +MESSAGE((short)1, "message"), +ERROR_CODE((short)2, "errorCode"), +SQLSTATE((short)3, "SQLState"); + +private static final Map byName = new HashMap (); + +static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { +byName.put(field.getFieldName(), field); + } +} + +/** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ +public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { +case 1: // MESSAGE + return MESSAGE; +case 2: // ERROR_CODE + return ERROR_CODE; +case 3: // SQLSTATE + return SQLSTATE; +default: + return null; + } +} + +/** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ +public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; +} + +/** + * Find the _Fields constant that matches name, or null if its not found. + */ +public static _Fields findByName(String name) { + return byName.get(name); +} + +private final short _thriftId; +private final String _fieldName; + +_Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; +} + +public short getThriftFieldId() { + return _thriftId; +} + +public String getFieldName() { + return _fieldName; +} + } + + // isset id assignments + private
[2/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote -- diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote new file mode 100755 index 000..9a2322f --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote @@ -0,0 +1,1242 @@ +#!/usr/bin/env python +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +import sys +import pprint +from urlparse import urlparse +from thrift.transport import TTransport +from thrift.transport import TSocket +from thrift.transport import TSSLSocket +from thrift.transport import THttpClient +from thrift.protocol import TBinaryProtocol + +from hive_service import ThriftHive +from hive_service.ttypes import * + +if len(sys.argv) <= 1 or sys.argv[1] == '--help': + print('') + print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]') + print('') + print('Functions:') + print(' void execute(string query)') + print(' string fetchOne()') + print(' fetchN(i32 numRows)') + print(' fetchAll()') + print(' Schema getSchema()') + print(' Schema getThriftSchema()') + print(' HiveClusterStatus getClusterStatus()') + print(' QueryPlan getQueryPlan()') + print(' void clean()') + print(' string getMetaConf(string key)') + print(' void setMetaConf(string key, string value)') + print(' void create_database(Database database)') + print(' Database get_database(string name)') + print(' void drop_database(string name, bool deleteData, bool cascade)') + print(' get_databases(string pattern)') + print(' get_all_databases()') + print(' void alter_database(string dbname, Database db)') + print(' Type get_type(string name)') + print(' bool create_type(Type type)') + print(' bool drop_type(string type)') + print(' get_type_all(string name)') + print(' get_fields(string db_name, string table_name)') + print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' get_schema(string db_name, string table_name)') + print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' void create_table(Table tbl)') + print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') + print(' void drop_table(string dbname, string name, bool deleteData)') + print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)') + print(' get_tables(string db_name, string pattern)') + print(' get_table_meta(string db_patterns, string tbl_patterns, tbl_types)') + print(' get_all_tables(string db_name)') + print(' Table get_table(string dbname, string tbl_name)') + print(' get_table_objects_by_name(string dbname, tbl_names)') + print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') + print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') + print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') + print(' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)') + print(' Partition add_partition(Partition new_part)') + print(' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)') + print(' i32 add_partitions( new_parts)') + print(' i32 add_partitions_pspec( new_parts)') + print(' Partition append_partition(string db_name, string tbl_name, part_vals)') + print(' AddPartitionsResult add_partitions_req(AddPartitionsRequest request)') + print(' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context)') + print(' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)') + print(' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)') + print(' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)') + print(' bool drop_partition_with_environment_context(string db_name, string tbl_name, part_vals, bool deleteData, EnvironmentContext environment_context)') + print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)') + print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext
[8/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98303635 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98303635 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98303635 Branch: refs/heads/master Commit: 983036358633cfbb6aec30003faac8280372b2c9 Parents: 6a1f8a8 Author: Wei ZhengAuthored: Mon Apr 4 11:18:25 2016 -0700 Committer: Wei Zheng Committed: Mon Apr 4 11:18:25 2016 -0700 -- service-rpc/src/gen/thrift/gen-py/__init__.py |0 service/src/gen/thrift/gen-cpp/ThriftHive.cpp | 3544 service/src/gen/thrift/gen-cpp/ThriftHive.h | 1224 +++ .../gen-cpp/ThriftHive_server.skeleton.cpp | 84 + .../thrift/gen-cpp/hive_service_constants.cpp | 17 + .../gen/thrift/gen-cpp/hive_service_constants.h | 24 + .../gen/thrift/gen-cpp/hive_service_types.cpp | 351 + .../src/gen/thrift/gen-cpp/hive_service_types.h | 176 + .../hadoop/hive/service/HiveClusterStatus.java | 901 ++ .../hive/service/HiveServerException.java | 601 ++ .../hadoop/hive/service/JobTrackerState.java| 45 + .../apache/hadoop/hive/service/ThriftHive.java | 7784 ++ service/src/gen/thrift/gen-php/ThriftHive.php | 1943 + service/src/gen/thrift/gen-php/Types.php| 338 + service/src/gen/thrift/gen-py/__init__.py |0 .../gen-py/hive_service/ThriftHive-remote | 1242 +++ .../thrift/gen-py/hive_service/ThriftHive.py| 1674 .../gen/thrift/gen-py/hive_service/__init__.py |1 + .../gen/thrift/gen-py/hive_service/constants.py | 11 + .../gen/thrift/gen-py/hive_service/ttypes.py| 260 + .../gen/thrift/gen-rb/hive_service_constants.rb |9 + .../src/gen/thrift/gen-rb/hive_service_types.rb | 68 + service/src/gen/thrift/gen-rb/thrift_hive.rb| 555 ++ 23 files changed, 20852 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service-rpc/src/gen/thrift/gen-py/__init__.py -- diff --git a/service-rpc/src/gen/thrift/gen-py/__init__.py b/service-rpc/src/gen/thrift/gen-py/__init__.py new file mode 100644 index 000..e69de29
[6/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.h -- diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.h b/service/src/gen/thrift/gen-cpp/ThriftHive.h new file mode 100644 index 000..902bd4b --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/ThriftHive.h @@ -0,0 +1,1224 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#ifndef ThriftHive_H +#define ThriftHive_H + +#include +#include +#include "hive_service_types.h" +#include "ThriftHiveMetastore.h" + +namespace Apache { namespace Hadoop { namespace Hive { + +#ifdef _WIN32 + #pragma warning( push ) + #pragma warning (disable : 4250 ) //inheriting methods via dominance +#endif + +class ThriftHiveIf : virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreIf { + public: + virtual ~ThriftHiveIf() {} + virtual void execute(const std::string& query) = 0; + virtual void fetchOne(std::string& _return) = 0; + virtual void fetchN(std::vector & _return, const int32_t numRows) = 0; + virtual void fetchAll(std::vector & _return) = 0; + virtual void getSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0; + virtual void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0; + virtual void getClusterStatus(HiveClusterStatus& _return) = 0; + virtual void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) = 0; + virtual void clean() = 0; +}; + +class ThriftHiveIfFactory : virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreIfFactory { + public: + typedef ThriftHiveIf Handler; + + virtual ~ThriftHiveIfFactory() {} + + virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0; + virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) = 0; +}; + +class ThriftHiveIfSingletonFactory : virtual public ThriftHiveIfFactory { + public: + ThriftHiveIfSingletonFactory(const boost::shared_ptr& iface) : iface_(iface) {} + virtual ~ThriftHiveIfSingletonFactory() {} + + virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo&) { +return iface_.get(); + } + virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) {} + + protected: + boost::shared_ptr iface_; +}; + +class ThriftHiveNull : virtual public ThriftHiveIf , virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreNull { + public: + virtual ~ThriftHiveNull() {} + void execute(const std::string& /* query */) { +return; + } + void fetchOne(std::string& /* _return */) { +return; + } + void fetchN(std::vector & /* _return */, const int32_t /* numRows */) { +return; + } + void fetchAll(std::vector & /* _return */) { +return; + } + void getSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) { +return; + } + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) { +return; + } + void getClusterStatus(HiveClusterStatus& /* _return */) { +return; + } + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& /* _return */) { +return; + } + void clean() { +return; + } +}; + +typedef struct _ThriftHive_execute_args__isset { + _ThriftHive_execute_args__isset() : query(false) {} + bool query :1; +} _ThriftHive_execute_args__isset; + +class ThriftHive_execute_args { + public: + + ThriftHive_execute_args(const ThriftHive_execute_args&); + ThriftHive_execute_args& operator=(const ThriftHive_execute_args&); + ThriftHive_execute_args() : query() { + } + + virtual ~ThriftHive_execute_args() throw(); + std::string query; + + _ThriftHive_execute_args__isset __isset; + + void __set_query(const std::string& val); + + bool operator == (const ThriftHive_execute_args & rhs) const + { +if (!(query == rhs.query)) + return false; +return true; + } + bool operator != (const ThriftHive_execute_args ) const { +return !(*this == rhs); + } + + bool operator < (const ThriftHive_execute_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_execute_pargs { + public: + + + virtual ~ThriftHive_execute_pargs() throw(); + const std::string* query; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_execute_result__isset { + _ThriftHive_execute_result__isset() : ex(false) {} + bool ex :1; +} _ThriftHive_execute_result__isset; + +class ThriftHive_execute_result { + public: + + ThriftHive_execute_result(const ThriftHive_execute_result&); + ThriftHive_execute_result& operator=(const ThriftHive_execute_result&); + ThriftHive_execute_result() { + } + + virtual ~ThriftHive_execute_result() throw(); + HiveServerException ex; + + _ThriftHive_execute_result__isset __isset; + + void __set_ex(const
[7/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.cpp -- diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.cpp b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp new file mode 100644 index 000..a5448f0 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp @@ -0,0 +1,3544 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#include "ThriftHive.h" + +namespace Apache { namespace Hadoop { namespace Hive { + + +ThriftHive_execute_args::~ThriftHive_execute_args() throw() { +} + + +uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { +xfer += iprot->readFieldBegin(fname, ftype, fid); +if (ftype == ::apache::thrift::protocol::T_STOP) { + break; +} +switch (fid) +{ + case 1: +if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->query); + this->__isset.query = true; +} else { + xfer += iprot->skip(ftype); +} +break; + default: +xfer += iprot->skip(ftype); +break; +} +xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_execute_args"); + + xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->query); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_pargs::~ThriftHive_execute_pargs() throw() { +} + + +uint32_t ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_execute_pargs"); + + xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->query))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_result::~ThriftHive_execute_result() throw() { +} + + +uint32_t ThriftHive_execute_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { +xfer += iprot->readFieldBegin(fname, ftype, fid); +if (ftype == ::apache::thrift::protocol::T_STOP) { + break; +} +switch (fid) +{ + case 1: +if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; +} else { + xfer += iprot->skip(ftype); +} +break; + default: +xfer += iprot->skip(ftype); +break; +} +xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_execute_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_execute_result"); + + if (this->__isset.ex) { +xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); +xfer += this->ex.write(oprot); +xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_presult::~ThriftHive_execute_presult() throw() { +} + + +uint32_t ThriftHive_execute_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { +xfer += iprot->readFieldBegin(fname, ftype, fid); +if (ftype == ::apache::thrift::protocol::T_STOP) { + break; +} +switch (fid) +{ + case 1: +if (ftype ==
[4/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java -- diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java new file mode 100644 index 000..934a8a5 --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java @@ -0,0 +1,7784 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class ThriftHive { + + public interface Iface extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface { + +public void execute(String query) throws HiveServerException, org.apache.thrift.TException; + +public String fetchOne() throws HiveServerException, org.apache.thrift.TException; + +public List fetchN(int numRows) throws HiveServerException, org.apache.thrift.TException; + +public List fetchAll() throws HiveServerException, org.apache.thrift.TException; + +public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws HiveServerException, org.apache.thrift.TException; + +public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() throws HiveServerException, org.apache.thrift.TException; + +public HiveClusterStatus getClusterStatus() throws HiveServerException, org.apache.thrift.TException; + +public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws HiveServerException, org.apache.thrift.TException; + +public void clean() throws org.apache.thrift.TException; + + } + + public interface AsyncIface extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore .AsyncIface { + +public void execute(String query, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void fetchOne(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void fetchN(int numRows, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void fetchAll(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void getSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + +public void clean(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + } + + public static class Client extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client implements Iface { +public static class Factory implements org.apache.thrift.TServiceClientFactory { + public Factory() {} + public Client getClient(org.apache.thrift.protocol.TProtocol prot) { +return new Client(prot); + } + public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { +return new Client(iprot, oprot); + } +} + +public Client(org.apache.thrift.protocol.TProtocol prot) +{ + super(prot, prot); +} + +public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + super(iprot, oprot); +} + +public void execute(String query) throws
[3/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-php/ThriftHive.php -- diff --git a/service/src/gen/thrift/gen-php/ThriftHive.php b/service/src/gen/thrift/gen-php/ThriftHive.php new file mode 100644 index 000..23dc8fd --- /dev/null +++ b/service/src/gen/thrift/gen-php/ThriftHive.php @@ -0,0 +1,1943 @@ +send_execute($query); +$this->recv_execute(); + } + + public function send_execute($query) + { +$args = new \ThriftHive_execute_args(); +$args->query = $query; +$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); +if ($bin_accel) +{ + thrift_protocol_write_binary($this->output_, 'execute', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); +} +else +{ + $this->output_->writeMessageBegin('execute', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); +} + } + + public function recv_execute() + { +$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); +if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_execute_result', $this->input_->isStrictRead()); +else +{ + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { +$x = new TApplicationException(); +$x->read($this->input_); +$this->input_->readMessageEnd(); +throw $x; + } + $result = new \ThriftHive_execute_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); +} +if ($result->ex !== null) { + throw $result->ex; +} +return; + } + + public function fetchOne() + { +$this->send_fetchOne(); +return $this->recv_fetchOne(); + } + + public function send_fetchOne() + { +$args = new \ThriftHive_fetchOne_args(); +$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); +if ($bin_accel) +{ + thrift_protocol_write_binary($this->output_, 'fetchOne', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); +} +else +{ + $this->output_->writeMessageBegin('fetchOne', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); +} + } + + public function recv_fetchOne() + { +$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); +if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_fetchOne_result', $this->input_->isStrictRead()); +else +{ + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { +$x = new TApplicationException(); +$x->read($this->input_); +$this->input_->readMessageEnd(); +throw $x; + } + $result = new \ThriftHive_fetchOne_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); +} +if ($result->success !== null) { + return $result->success; +} +if ($result->ex !== null) { + throw $result->ex; +} +throw new \Exception("fetchOne failed: unknown result"); + } + + public function fetchN($numRows) + { +$this->send_fetchN($numRows); +return $this->recv_fetchN(); + } + + public function send_fetchN($numRows) + { +$args = new \ThriftHive_fetchN_args(); +$args->numRows = $numRows; +$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); +if ($bin_accel) +{ + thrift_protocol_write_binary($this->output_, 'fetchN', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); +} +else +{ + $this->output_->writeMessageBegin('fetchN', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); +} + } + + public function recv_fetchN() + { +$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); +if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_fetchN_result', $this->input_->isStrictRead()); +else +{ + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype ==
[1/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)
Repository: hive Updated Branches: refs/heads/master 6a1f8a835 -> 983036358 http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py -- diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py new file mode 100644 index 000..978c2a3 --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py @@ -0,0 +1,1674 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +from thrift.Thrift import TType, TMessageType, TException, TApplicationException +import hive_metastore.ThriftHiveMetastore +import logging +from ttypes import * +from thrift.Thrift import TProcessor +from thrift.transport import TTransport +from thrift.protocol import TBinaryProtocol, TProtocol +try: + from thrift.protocol import fastbinary +except: + fastbinary = None + + +class Iface(hive_metastore.ThriftHiveMetastore.Iface): + def execute(self, query): +""" +Parameters: + - query +""" +pass + + def fetchOne(self): +pass + + def fetchN(self, numRows): +""" +Parameters: + - numRows +""" +pass + + def fetchAll(self): +pass + + def getSchema(self): +pass + + def getThriftSchema(self): +pass + + def getClusterStatus(self): +pass + + def getQueryPlan(self): +pass + + def clean(self): +pass + + +class Client(hive_metastore.ThriftHiveMetastore.Client, Iface): + def __init__(self, iprot, oprot=None): +hive_metastore.ThriftHiveMetastore.Client.__init__(self, iprot, oprot) + + def execute(self, query): +""" +Parameters: + - query +""" +self.send_execute(query) +self.recv_execute() + + def send_execute(self, query): +self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid) +args = execute_args() +args.query = query +args.write(self._oprot) +self._oprot.writeMessageEnd() +self._oprot.trans.flush() + + def recv_execute(self): +iprot = self._iprot +(fname, mtype, rseqid) = iprot.readMessageBegin() +if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x +result = execute_result() +result.read(iprot) +iprot.readMessageEnd() +if result.ex is not None: + raise result.ex +return + + def fetchOne(self): +self.send_fetchOne() +return self.recv_fetchOne() + + def send_fetchOne(self): +self._oprot.writeMessageBegin('fetchOne', TMessageType.CALL, self._seqid) +args = fetchOne_args() +args.write(self._oprot) +self._oprot.writeMessageEnd() +self._oprot.trans.flush() + + def recv_fetchOne(self): +iprot = self._iprot +(fname, mtype, rseqid) = iprot.readMessageBegin() +if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x +result = fetchOne_result() +result.read(iprot) +iprot.readMessageEnd() +if result.success is not None: + return result.success +if result.ex is not None: + raise result.ex +raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchOne failed: unknown result") + + def fetchN(self, numRows): +""" +Parameters: + - numRows +""" +self.send_fetchN(numRows) +return self.recv_fetchN() + + def send_fetchN(self, numRows): +self._oprot.writeMessageBegin('fetchN', TMessageType.CALL, self._seqid) +args = fetchN_args() +args.numRows = numRows +args.write(self._oprot) +self._oprot.writeMessageEnd() +self._oprot.trans.flush() + + def recv_fetchN(self): +iprot = self._iprot +(fname, mtype, rseqid) = iprot.readMessageBegin() +if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x +result = fetchN_result() +result.read(iprot) +iprot.readMessageEnd() +if result.success is not None: + return result.success +if result.ex is not None: + raise result.ex +raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchN failed: unknown result") + + def fetchAll(self): +self.send_fetchAll() +return self.recv_fetchAll() + + def send_fetchAll(self): +self._oprot.writeMessageBegin('fetchAll', TMessageType.CALL, self._seqid) +args = fetchAll_args() +args.write(self._oprot) +self._oprot.writeMessageEnd() +self._oprot.trans.flush() + + def recv_fetchAll(self): +iprot = self._iprot +(fname, mtype, rseqid) = iprot.readMessageBegin() +if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x +result = fetchAll_result() +
[2/2] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)
HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eda73032 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eda73032 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eda73032 Branch: refs/heads/branch-2.0 Commit: eda7303209c2d59428261201c7926904ba127bc3 Parents: 1785ca0 Author: Wei ZhengAuthored: Thu Mar 24 22:18:32 2016 -0700 Committer: Wei Zheng Committed: Wed Mar 30 15:10:50 2016 -0700 -- .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++-- .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++-- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 974184f..64edfb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -227,8 +227,9 @@ public class Cleaner extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 9d71c5a..465896d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -222,8 +222,8 @@ public class Initiator extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { -LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + -ci.getFullPartitionName()); +LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + +ci.getFullPartitionName(), exception); } return compactionType; } http://git-wip-us.apache.org/repos/asf/hive/blob/eda73032/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 8dbe3d4..cdae26f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -175,8 +175,9 @@ public class Worker extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCompacted(ci); } catch (Exception e) {
[1/2] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-2.0 f4468ce68 -> eda730320 HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1785ca00 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1785ca00 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1785ca00 Branch: refs/heads/branch-2.0 Commit: 1785ca000596177c28511dc151deb967c3ce1710 Parents: f4468ce Author: Wei ZhengAuthored: Thu Mar 24 17:29:59 2016 -0700 Committer: Wei Zheng Committed: Wed Mar 30 15:10:42 2016 -0700 -- .../hive/hcatalog/streaming/HiveEndPoint.java | 11 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++ .../hive/ql/txn/compactor/CompactorThread.java | 5 +++ .../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +++- .../hadoop/hive/ql/txn/compactor/Worker.java| 6 +++ .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 6 files changed, 82 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java -- diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 4c77842..baeafad 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -18,6 +18,7 @@ package org.apache.hive.hcatalog.streaming; +import org.apache.hadoop.fs.FileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.cli.CliSessionState; @@ -342,6 +343,11 @@ public class HiveEndPoint { return null; } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { LOG.error("Error closing connection to " + endPt, e); } catch (InterruptedException e) { @@ -937,6 +943,11 @@ public class HiveEndPoint { } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username + "' on endPoint :" + endPt, e); http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index fbf5481..974184f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -224,6 +224,11 @@ public class Cleaner extends CompactorThread { return null; } }); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + + ci.getFullPartitionName());} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/1785ca00/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 3f6b099..859caff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -173,6 +173,11 @@ abstract class CompactorThread extends Thread implements MetaStoreThread { return null; } }); + try { +FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { +LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); + } if (wrapper.size() == 1) { LOG.debug("Running job as " + wrapper.get(0));
hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 5bae0ad45 -> 82068205a HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82068205 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82068205 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82068205 Branch: refs/heads/branch-1 Commit: 82068205a59ed4b6aeb2b353eb612ce0da73c5c2 Parents: 5bae0ad Author: Wei ZhengAuthored: Thu Mar 24 22:38:39 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 22:38:39 2016 -0700 -- .../hive/hcatalog/streaming/HiveEndPoint.java | 11 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 6 +++ .../hive/ql/txn/compactor/CompactorThread.java | 5 +++ .../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +++- .../hadoop/hive/ql/txn/compactor/Worker.java| 7 +++ .../apache/hadoop/hive/ql/TestTxnCommands2.java | 46 6 files changed, 83 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java -- diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index b0bbd66..2e81bf8 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.streaming; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -342,6 +343,11 @@ public class HiveEndPoint { return null; } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { LOG.error("Error closing connection to " + endPt, e); } catch (InterruptedException e) { @@ -937,6 +943,11 @@ public class HiveEndPoint { } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username + "' on endPoint :" + endPt, e); http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 1e6e8a1..d861bc2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -272,6 +272,12 @@ public class Cleaner extends CompactorThread { return null; } }); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index ae8865c..952b27a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements MetaStoreThread { return null; } }); + try { +FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { +LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); + }
hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master 6bfec2e97 -> 4fabd038c HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4fabd038 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4fabd038 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4fabd038 Branch: refs/heads/master Commit: 4fabd038cf64b906a89726805958c43b97194291 Parents: 6bfec2e Author: Wei ZhengAuthored: Thu Mar 24 22:18:32 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 22:18:32 2016 -0700 -- .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++-- .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++-- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 4c31a49..23b1b7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -275,8 +275,9 @@ public class Cleaner extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 98ebf53..abbe5d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -235,8 +235,8 @@ public class Initiator extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { -LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + -ci.getFullPartitionName()); +LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + +ci.getFullPartitionName(), exception); } return compactionType; } http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index e21ca27..6238e2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -175,8 +175,9 @@ public class Worker extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCompacted(ci); } catch (Exception e) {
hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master d3a5f20b4 -> f9d1b6ab7 HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f9d1b6ab Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f9d1b6ab Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f9d1b6ab Branch: refs/heads/master Commit: f9d1b6ab77ab15b8337c17fbe38557c1f7b5ce58 Parents: d3a5f20 Author: Wei ZhengAuthored: Thu Mar 24 17:29:59 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 17:29:59 2016 -0700 -- .../hive/hcatalog/streaming/HiveEndPoint.java | 11 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++ .../hive/ql/txn/compactor/CompactorThread.java | 5 +++ .../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +++- .../hadoop/hive/ql/txn/compactor/Worker.java| 8 +++- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 6 files changed, 82 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java -- diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 4c77842..baeafad 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -18,6 +18,7 @@ package org.apache.hive.hcatalog.streaming; +import org.apache.hadoop.fs.FileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.cli.CliSessionState; @@ -342,6 +343,11 @@ public class HiveEndPoint { return null; } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { LOG.error("Error closing connection to " + endPt, e); } catch (InterruptedException e) { @@ -937,6 +943,11 @@ public class HiveEndPoint { } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username + "' on endPoint :" + endPt, e); http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 9ffeaec..4c31a49 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -272,6 +272,11 @@ public class Cleaner extends CompactorThread { return null; } }); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + + ci.getFullPartitionName());} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 8495c66..4d6e24e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements MetaStoreThread { return null; } }); + try { +FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { +LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); + } if (wrapper.size() == 1) { LOG.debug("Running job as " + wrapper.get(0));
hive git commit: HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 505c5585c -> 0aaddb7d7 HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0aaddb7d Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0aaddb7d Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0aaddb7d Branch: refs/heads/branch-1 Commit: 0aaddb7d753a2936c973d9ab99e6edb2554f94ae Parents: 505c558 Author: Wei ZhengAuthored: Mon Mar 21 14:50:12 2016 -0700 Committer: Wei Zheng Committed: Mon Mar 21 14:50:12 2016 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 7 + .../metastore/txn/CompactionTxnHandler.java | 120 + .../hadoop/hive/metastore/txn/TxnDbUtil.java| 4 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 245 ++- .../hive/metastore/txn/TestTxnHandler.java | 83 ++- 5 files changed, 333 insertions(+), 126 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/0aaddb7d/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 4a575b3..b78bea2 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -573,6 +573,13 @@ public class HiveConf extends Configuration { "select query has incorrect syntax or something similar inside a transaction, the\n" + "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" + "should disable the usage of direct SQL inside transactions if that happens in your case."), +METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" + +" size of a query string (in KB)."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000, +"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" + +" multiple OR separated IN clauses."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause", +1000, "The maximum number of values in a VALUES clause for INSERT statement."), METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + http://git-wip-us.apache.org/repos/asf/hive/blob/0aaddb7d/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 4d736b9..28e06ed 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -364,36 +364,38 @@ public class CompactionTxnHandler extends TxnHandler { rs = stmt.executeQuery(s); List txnids = new ArrayList<>(); while (rs.next()) txnids.add(rs.getLong(1)); +// Remove entries from txn_components, as there may be aborted txn components if (txnids.size() > 0) { + List queries = new ArrayList(); + + // Prepare prefix and suffix + StringBuilder prefix = new StringBuilder(); + StringBuilder suffix = new StringBuilder(); + + prefix.append("delete from TXN_COMPONENTS where "); - // Remove entries from txn_components, as there may be aborted txn components - StringBuilder buf = new StringBuilder(); - //todo: add a safeguard to make sure IN clause is not too large; break up by txn id - buf.append("delete from TXN_COMPONENTS where tc_txnid in ("); - boolean first = true; - for (long id : txnids) { -if (first) first = false; -else buf.append(", "); -buf.append(id); - } //because 1 txn may include different partitions/tables even in auto commit mode - buf.append(") and tc_database = '"); - buf.append(info.dbname); - buf.append("' and tc_table = '"); - buf.append(info.tableName); - buf.append("'"); +
hive git commit: HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master a6155b75e -> db8fb8a42 HIVE-12439 : CompactionTxnHandler.markCleaned() and TxnHandler.openTxns() misc improvements (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db8fb8a4 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db8fb8a4 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db8fb8a4 Branch: refs/heads/master Commit: db8fb8a42a690eaa937d1a0163eaf505c3c48a07 Parents: a6155b7 Author: Wei ZhengAuthored: Mon Mar 21 11:38:38 2016 -0700 Committer: Wei Zheng Committed: Mon Mar 21 11:38:38 2016 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 7 + .../metastore/txn/CompactionTxnHandler.java | 120 --- .../hadoop/hive/metastore/txn/TxnDbUtil.java| 4 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 151 +++ .../hadoop/hive/metastore/txn/TxnUtils.java | 95 .../hadoop/hive/metastore/txn/TestTxnUtils.java | 135 + 6 files changed, 390 insertions(+), 122 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/db8fb8a4/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 98c6372..0f8d67f 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -737,6 +737,13 @@ public class HiveConf extends Configuration { "select query has incorrect syntax or something similar inside a transaction, the\n" + "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" + "should disable the usage of direct SQL inside transactions if that happens in your case."), +METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" + +" size of a query string (in KB)."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000, +"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" + +" multiple OR separated IN clauses."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause", +1000, "The maximum number of values in a VALUES clause for INSERT statement."), METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + http://git-wip-us.apache.org/repos/asf/hive/blob/db8fb8a4/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index da2b395..15c01da 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -369,36 +369,38 @@ class CompactionTxnHandler extends TxnHandler { rs = stmt.executeQuery(s); List txnids = new ArrayList<>(); while (rs.next()) txnids.add(rs.getLong(1)); +// Remove entries from txn_components, as there may be aborted txn components if (txnids.size() > 0) { + List queries = new ArrayList(); + + // Prepare prefix and suffix + StringBuilder prefix = new StringBuilder(); + StringBuilder suffix = new StringBuilder(); + + prefix.append("delete from TXN_COMPONENTS where "); - // Remove entries from txn_components, as there may be aborted txn components - StringBuilder buf = new StringBuilder(); - //todo: add a safeguard to make sure IN clause is not too large; break up by txn id - buf.append("delete from TXN_COMPONENTS where tc_txnid in ("); - boolean first = true; - for (long id : txnids) { -if (first) first = false; -else buf.append(", "); -buf.append(id); - } //because 1 txn may include different partitions/tables even in auto commit mode - buf.append(") and tc_database = '"); - buf.append(info.dbname); - buf.append("' and tc_table = '"); -
hive git commit: HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)
Repository: hive Updated Branches: refs/heads/branch-1 214e4b6ff -> 1c44f4ccd HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1c44f4cc Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1c44f4cc Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1c44f4cc Branch: refs/heads/branch-1 Commit: 1c44f4ccdcf1d2e47b9132a45e57c04b27ec6ac2 Parents: 214e4b6 Author: Wei ZhengAuthored: Mon Mar 14 14:45:54 2016 -0700 Committer: Wei Zheng Committed: Mon Mar 14 14:45:54 2016 -0700 -- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java | 1 + ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java | 4 .../test/queries/clientnegative/compact_non_acid_table.q | 11 +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q | 2 +- ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q | 2 +- ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q | 2 +- .../results/clientnegative/compact_non_acid_table.q.out | 11 +++ .../test/results/clientpositive/dbtxnmgr_compact1.q.out | 4 ++-- .../test/results/clientpositive/dbtxnmgr_compact2.q.out | 4 ++-- .../test/results/clientpositive/dbtxnmgr_compact3.q.out | 4 ++-- 10 files changed, 36 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 77e82a4..160a31d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -407,6 +407,7 @@ public enum ErrorMsg { TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on one partition at a " + "time."), DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current context"), + NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on non-ACID table {0}.{1}", true), UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing rewritten update or " + "delete query"), http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 3d8ca92..414293c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1710,6 +1710,10 @@ public class DDLTask extends Task implements Serializable { private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { Table tbl = db.getTable(desc.getTableName()); +if (!AcidUtils.isAcidTable(tbl)) { + throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(), + tbl.getTableName()); +} String partName = null; if (desc.getPartSpec() == null) { http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/test/queries/clientnegative/compact_non_acid_table.q -- diff --git a/ql/src/test/queries/clientnegative/compact_non_acid_table.q b/ql/src/test/queries/clientnegative/compact_non_acid_table.q new file mode 100644 index 000..e9faa24 --- /dev/null +++ b/ql/src/test/queries/clientnegative/compact_non_acid_table.q @@ -0,0 +1,11 @@ +set hive.mapred.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; + + +create table not_an_acid_table (a int, b varchar(128)); + +alter table not_an_acid_table compact 'major'; + +drop table not_an_acid_table; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/1c44f4cc/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q -- diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q index 7f71305..b86c6f9 100644 --- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q @@ -1,7 +1,7 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table T1(key string, val string) stored as textfile; +create table T1(key string, val string) clustered by (val) into 2
hive git commit: HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)
Repository: hive Updated Branches: refs/heads/master e7a175663 -> b6af0124b HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6af0124 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6af0124 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6af0124 Branch: refs/heads/master Commit: b6af0124b351ba759a15c81f8ececd7920115b2f Parents: e7a1756 Author: Wei ZhengAuthored: Mon Mar 14 14:34:28 2016 -0700 Committer: Wei Zheng Committed: Mon Mar 14 14:34:28 2016 -0700 -- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java | 1 + ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java | 4 .../test/queries/clientnegative/compact_non_acid_table.q | 11 +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q | 2 +- ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q | 2 +- ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q | 2 +- .../results/clientnegative/compact_non_acid_table.q.out | 11 +++ .../test/results/clientpositive/dbtxnmgr_compact1.q.out | 4 ++-- .../test/results/clientpositive/dbtxnmgr_compact2.q.out | 4 ++-- .../test/results/clientpositive/dbtxnmgr_compact3.q.out | 4 ++-- 10 files changed, 36 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index f0cc3a2..f091f67 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -397,6 +397,7 @@ public enum ErrorMsg { TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on one partition at a " + "time."), DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current context"), + NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on non-ACID table {0}.{1}", true), UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing rewritten update or " + "delete query"), http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 2a64cfa..56eecf6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1745,6 +1745,10 @@ public class DDLTask extends Task implements Serializable { private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { Table tbl = db.getTable(desc.getTableName()); +if (!AcidUtils.isAcidTable(tbl)) { + throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(), + tbl.getTableName()); +} String partName = null; if (desc.getPartSpec() == null) { http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientnegative/compact_non_acid_table.q -- diff --git a/ql/src/test/queries/clientnegative/compact_non_acid_table.q b/ql/src/test/queries/clientnegative/compact_non_acid_table.q new file mode 100644 index 000..e9faa24 --- /dev/null +++ b/ql/src/test/queries/clientnegative/compact_non_acid_table.q @@ -0,0 +1,11 @@ +set hive.mapred.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; + + +create table not_an_acid_table (a int, b varchar(128)); + +alter table not_an_acid_table compact 'major'; + +drop table not_an_acid_table; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q -- diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q index 7f71305..b86c6f9 100644 --- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q @@ -1,7 +1,7 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table T1(key string, val string) stored as textfile; +create table T1(key string, val string) clustered by (val) into 2 buckets
hive git commit: HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)
Repository: hive Updated Branches: refs/heads/branch-1 73a677be3 -> 214e4b6ff HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/214e4b6f Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/214e4b6f Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/214e4b6f Branch: refs/heads/branch-1 Commit: 214e4b6ffedbdc0f610babcf1156cd32f0659db3 Parents: 73a677b Author: Wei ZhengAuthored: Thu Mar 10 16:57:26 2016 -0800 Committer: Wei Zheng Committed: Thu Mar 10 16:57:26 2016 -0800 -- .../hive/metastore/AcidEventListener.java | 69 +++ .../hadoop/hive/metastore/HiveMetaStore.java| 1 + .../hadoop/hive/metastore/txn/TxnDbUtil.java| 20 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 183 +- .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 25 +-- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +-- .../hive/ql/lockmgr/TestDbTxnManager2.java | 186 +++ 7 files changed, 460 insertions(+), 46 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/214e4b6f/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java new file mode 100644 index 000..767bc54 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.DropTableEvent; +import org.apache.hadoop.hive.metastore.txn.TxnHandler; + + +/** + * It handles cleanup of dropped partition/table/database in ACID related metastore tables + */ +public class AcidEventListener extends MetaStoreEventListener { + + private TxnHandler txnHandler; + private HiveConf hiveConf; + + public AcidEventListener(Configuration configuration) { +super(configuration); +hiveConf = (HiveConf) configuration; + } + + @Override + public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException { +// We can loop thru all the tables to check if they are ACID first and then perform cleanup, +// but it's more efficient to unconditionally perform cleanup for the database, especially +// when there are a lot of tables +txnHandler = new TxnHandler(hiveConf); +txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), null, null); + } + + @Override + public void onDropTable(DropTableEvent tableEvent) throws MetaException { +if (TxnHandler.isAcidTable(tableEvent.getTable())) { + txnHandler = new TxnHandler(hiveConf); + txnHandler.cleanupRecords(HiveObjectType.TABLE, null, tableEvent.getTable(), null); +} + } + + @Override + public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException { +if (TxnHandler.isAcidTable(partitionEvent.getTable())) { + txnHandler = new TxnHandler(hiveConf); + txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, partitionEvent.getTable(), + partitionEvent.getPartitionIterator()); +} + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/214e4b6f/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java -- diff --git
hive git commit: HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)
Repository: hive Updated Branches: refs/heads/master ff55d0a67 -> 456a91ecd HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/456a91ec Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/456a91ec Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/456a91ec Branch: refs/heads/master Commit: 456a91ecde6a449177a76fb34ad9b5f13983821b Parents: ff55d0a Author: Wei ZhengAuthored: Thu Mar 10 14:37:35 2016 -0800 Committer: Wei Zheng Committed: Thu Mar 10 14:37:35 2016 -0800 -- .../hive/metastore/AcidEventListener.java | 94 + .../hadoop/hive/metastore/HiveMetaStore.java| 1 + .../hadoop/hive/metastore/txn/TxnDbUtil.java| 20 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 167 ++- .../hadoop/hive/metastore/txn/TxnStore.java | 37 ++-- .../hadoop/hive/metastore/txn/TxnUtils.java | 18 ++ .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 27 +-- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +- .../hive/ql/lockmgr/TestDbTxnManager2.java | 209 ++- 9 files changed, 518 insertions(+), 77 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java new file mode 100644 index 000..71ad916 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.DropTableEvent; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; + + +/** + * It handles cleanup of dropped partition/table/database in ACID related metastore tables + */ +public class AcidEventListener extends MetaStoreEventListener { + + private TxnStore txnHandler; + private HiveConf hiveConf; + + public AcidEventListener(Configuration configuration) { +super(configuration); +hiveConf = (HiveConf) configuration; + } + + @Override + public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException { +// We can loop thru all the tables to check if they are ACID first and then perform cleanup, +// but it's more efficient to unconditionally perform cleanup for the database, especially +// when there are a lot of tables +txnHandler = getTxnHandler(); +txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), null, null); + } + + @Override + public void onDropTable(DropTableEvent tableEvent) throws MetaException { +if (TxnUtils.isAcidTable(tableEvent.getTable())) { + txnHandler = getTxnHandler(); + txnHandler.cleanupRecords(HiveObjectType.TABLE, null, tableEvent.getTable(), null); +} + } + + @Override + public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException { +if (TxnUtils.isAcidTable(partitionEvent.getTable())) { + txnHandler = getTxnHandler(); + txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, partitionEvent.getTable(), + partitionEvent.getPartitionIterator()); +} + } + + private TxnStore getTxnHandler() { +boolean hackOn = HiveConf.getBoolVar(hiveConf,
[2/2] hive git commit: HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)
HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/73a677be Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/73a677be Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/73a677be Branch: refs/heads/branch-1 Commit: 73a677be3e2027b61ae043544311e9f296fab613 Parents: 24b366f Author: WeiAuthored: Thu Mar 10 14:02:17 2016 -0800 Committer: Wei Committed: Thu Mar 10 14:02:17 2016 -0800 -- .../hive/metastore/TestHiveMetaStore.java.orig | 3224 -- 1 file changed, 3224 deletions(-) --
[1/2] hive git commit: HIVE-13175 : Disallow making external tables transactional ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 24b366f0b -> 73a677be3 http://git-wip-us.apache.org/repos/asf/hive/blob/73a677be/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig deleted file mode 100644 index b005759..000 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig +++ /dev/null @@ -1,3224 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.util.StringUtils; -import org.apache.thrift.TException; -import org.junit.Assert; -import org.junit.Test; - -import com.google.common.collect.Lists; - -public abstract class TestHiveMetaStore extends TestCase { - private static final Log LOG = LogFactory.getLog(TestHiveMetaStore.class); - protected static HiveMetaStoreClient client; - protected static HiveConf hiveConf; - protected static Warehouse warehouse; - protected static boolean isThriftClient = false; - - private
[3/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)
HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/24b366f0 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/24b366f0 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/24b366f0 Branch: refs/heads/branch-1 Commit: 24b366f0bbe785b6f478881e8224e43d7aa33094 Parents: f7c8fb5 Author: WeiAuthored: Thu Mar 10 13:58:42 2016 -0800 Committer: Wei Committed: Thu Mar 10 13:58:42 2016 -0800 -- .../hive/metastore/TestHiveMetaStore.java |1 + .../hive/metastore/TestHiveMetaStore.java.orig | 3224 ++ .../TransactionalValidationListener.java| 11 + .../clientnegative/alter_external_acid.q|9 + .../clientnegative/create_external_acid.q |6 + .../clientnegative/alter_external_acid.q.out| 13 + .../clientnegative/create_external_acid.q.out |5 + 7 files changed, 3269 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index b005759..605dc9d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -2941,6 +2941,7 @@ public abstract class TestHiveMetaStore extends TestCase { tbl.setSd(sd); tbl.setLastAccessTime(lastAccessTime); +tbl.setTableType(TableType.MANAGED_TABLE.toString()); client.createTable(tbl);
[2/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)
http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig new file mode 100644 index 000..b005759 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java.orig @@ -0,0 +1,3224 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.io.HiveInputFormat; +import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TException; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public abstract class TestHiveMetaStore extends TestCase { + private static final Log LOG = LogFactory.getLog(TestHiveMetaStore.class); + protected static HiveMetaStoreClient client; + protected static HiveConf hiveConf; + protected static Warehouse warehouse; + protected static boolean isThriftClient = false; + + private static final String TEST_DB1_NAME = "testdb1"; + private static final String
[1/3] hive git commit: HIVE-13175 : Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 f7c8fb527 -> 24b366f0b http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 96158f8..3e74675 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -86,6 +86,12 @@ final class TransactionalValidationListener extends MetaStorePreEventListener { throw new MetaException("The table must be bucketed and stored using an ACID compliant" + " format (such as ORC)"); } + + if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { +throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() + +" cannot be declared transactional because it's an external table"); + } + return; } Table oldTable = context.getOldTable(); @@ -144,6 +150,11 @@ final class TransactionalValidationListener extends MetaStorePreEventListener { " format (such as ORC)"); } + if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { +throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() + +" cannot be declared transactional because it's an external table"); + } + // normalize prop name parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString()); return; http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/queries/clientnegative/alter_external_acid.q -- diff --git a/ql/src/test/queries/clientnegative/alter_external_acid.q b/ql/src/test/queries/clientnegative/alter_external_acid.q new file mode 100644 index 000..7807278 --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_external_acid.q @@ -0,0 +1,9 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc; + +alter table acid_external set TBLPROPERTIES ('transactional'='true'); + +drop table acid_external; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/queries/clientnegative/create_external_acid.q -- diff --git a/ql/src/test/queries/clientnegative/create_external_acid.q b/ql/src/test/queries/clientnegative/create_external_acid.q new file mode 100644 index 000..d6b2d84 --- /dev/null +++ b/ql/src/test/queries/clientnegative/create_external_acid.q @@ -0,0 +1,6 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/results/clientnegative/alter_external_acid.q.out -- diff --git a/ql/src/test/results/clientnegative/alter_external_acid.q.out b/ql/src/test/results/clientnegative/alter_external_acid.q.out new file mode 100644 index 000..69bba3b --- /dev/null +++ b/ql/src/test/results/clientnegative/alter_external_acid.q.out @@ -0,0 +1,13 @@ +PREHOOK: query: create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_external +POSTHOOK: query: create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_external +PREHOOK: query: alter table acid_external set TBLPROPERTIES ('transactional'='true') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@acid_external +PREHOOK: Output: default@acid_external +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table http://git-wip-us.apache.org/repos/asf/hive/blob/24b366f0/ql/src/test/results/clientnegative/create_external_acid.q.out -- diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out
hive git commit: HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master 1e8a31e8f -> ff55d0a67 HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff55d0a6 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff55d0a6 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff55d0a6 Branch: refs/heads/master Commit: ff55d0a67e59c15b5ccfbdf1317bfd60cf057a30 Parents: 1e8a31e Author: WeiAuthored: Thu Mar 10 13:39:13 2016 -0800 Committer: Wei Committed: Thu Mar 10 13:39:13 2016 -0800 -- .../hadoop/hive/metastore/TestHiveMetaStore.java | 1 + .../metastore/TransactionalValidationListener.java | 11 +++ .../test/queries/clientnegative/alter_external_acid.q | 9 + .../test/queries/clientnegative/create_external_acid.q | 6 ++ .../results/clientnegative/alter_external_acid.q.out | 13 + .../results/clientnegative/create_external_acid.q.out | 5 + 6 files changed, 45 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index a55c186..5da4165 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -2944,6 +2944,7 @@ public abstract class TestHiveMetaStore extends TestCase { tbl.setSd(sd); tbl.setLastAccessTime(lastAccessTime); +tbl.setTableType(TableType.MANAGED_TABLE.toString()); client.createTable(tbl); http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java -- diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 96158f8..3e74675 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -86,6 +86,12 @@ final class TransactionalValidationListener extends MetaStorePreEventListener { throw new MetaException("The table must be bucketed and stored using an ACID compliant" + " format (such as ORC)"); } + + if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { +throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() + +" cannot be declared transactional because it's an external table"); + } + return; } Table oldTable = context.getOldTable(); @@ -144,6 +150,11 @@ final class TransactionalValidationListener extends MetaStorePreEventListener { " format (such as ORC)"); } + if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { +throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() + +" cannot be declared transactional because it's an external table"); + } + // normalize prop name parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString()); return; http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/alter_external_acid.q -- diff --git a/ql/src/test/queries/clientnegative/alter_external_acid.q b/ql/src/test/queries/clientnegative/alter_external_acid.q new file mode 100644 index 000..7807278 --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_external_acid.q @@ -0,0 +1,9 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc; + +alter table acid_external set TBLPROPERTIES ('transactional'='true'); + +drop table acid_external; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/create_external_acid.q -- diff --git a/ql/src/test/queries/clientnegative/create_external_acid.q