This is an automated email from the ASF dual-hosted git repository.
zhangbutao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 6ab9b14c2c4 HIVE-27984: Support backward compatibility of hms thrift
struct about column stats (#4984)(Butao Zhang, reviewed by okumin, Zhihua Deng)
6ab9b14c2c4 is described below
commit 6ab9b14c2c45e139a30ab78d41cefe5f1ab64d22
Author: Butao Zhang <[email protected]>
AuthorDate: Thu Feb 1 14:27:43 2024 +0800
HIVE-27984: Support backward compatibility of hms thrift struct about
column stats (#4984)(Butao Zhang, reviewed by okumin, Zhihua Deng)
---
.../cache/TestCachedStoreUpdateUsingEvents.java | 18 ++--
.../hadoop/hive/ql/exec/ColumnStatsUpdateTask.java | 2 +-
.../hadoop/hive/ql/stats/ColStatsProcessor.java | 2 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 51 +++++-----
.../src/gen/thrift/gen-cpp/hive_metastore_types.h | 105 +++++++++++----------
.../hive/metastore/api/ColumnStatistics.java | 5 +-
.../metastore/api/GetPartitionsByNamesRequest.java | 5 +-
.../hadoop/hive/metastore/api/GetTableRequest.java | 5 +-
.../hive/metastore/api/PartitionsStatsRequest.java | 60 +++++++-----
.../metastore/api/SetPartitionsStatsRequest.java | 60 +++++++-----
.../hive/metastore/api/TableStatsRequest.java | 62 ++++++------
.../thrift/gen-php/metastore/ColumnStatistics.php | 2 +-
.../metastore/GetPartitionsByNamesRequest.php | 2 +-
.../thrift/gen-php/metastore/GetTableRequest.php | 2 +-
.../gen-php/metastore/PartitionsStatsRequest.php | 4 +-
.../metastore/SetPartitionsStatsRequest.php | 4 +-
.../thrift/gen-php/metastore/TableStatsRequest.php | 4 +-
.../src/gen/thrift/gen-py/hive_metastore/ttypes.py | 30 +++---
.../src/gen/thrift/gen-rb/hive_metastore_types.rb | 15 ++-
.../hadoop/hive/metastore/HiveMetaStoreClient.java | 18 ++--
.../src/main/thrift/hive_metastore.thrift | 12 +--
.../metastore/HiveMetaStoreClientPreCatalog.java | 22 +++--
22 files changed, 262 insertions(+), 228 deletions(-)
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
index dd08d8aa109..1ad1349b10b 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
@@ -585,7 +585,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue,
avgColLen));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
@@ -627,7 +627,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue,
avgColLen));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
@@ -940,7 +940,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue,
avgColLen));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
@@ -1007,7 +1007,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue,
avgColLen));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
@@ -1056,7 +1056,7 @@ public class TestCachedStoreUpdateUsingEvents {
// This will update the cache for non txn table.
PartitionsStatsRequest request = new PartitionsStatsRequest(dbName,
tblName,
- Collections.singletonList(colName[0]), partitions,
Constants.HIVE_ENGINE);
+ Collections.singletonList(colName[0]), partitions);
request.setCatName(DEFAULT_CATALOG_NAME);
request.setValidWriteIdList(validWriteIds);
AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request);
@@ -1123,7 +1123,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
hmsHandler.update_partition_column_statistics_req(setTblColStat);
@@ -1136,7 +1136,7 @@ public class TestCachedStoreUpdateUsingEvents {
// keep the txn open and verify that the stats got is not compliant.
PartitionsStatsRequest request = new PartitionsStatsRequest(dbName,
tblName,
- Collections.singletonList(colName[0]), partitions,
Constants.HIVE_ENGINE);
+ Collections.singletonList(colName[0]), partitions);
request.setCatName(DEFAULT_CATALOG_NAME);
request.setValidWriteIdList(validWriteIds);
AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request);
@@ -1175,7 +1175,7 @@ public class TestCachedStoreUpdateUsingEvents {
colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20));
colStats.setEngine(Constants.HIVE_ENGINE);
- SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest setTblColStat = new
SetPartitionsStatsRequest(Collections.singletonList(colStats));
setTblColStat.setWriteId(writeId);
setTblColStat.setValidWriteIdList(validWriteIds);
hmsHandler.update_partition_column_statistics_req(setTblColStat);
@@ -1191,7 +1191,7 @@ public class TestCachedStoreUpdateUsingEvents {
// keep the txn open and verify that the stats got is not compliant.
PartitionsStatsRequest request = new PartitionsStatsRequest(dbName,
tblName,
- Collections.singletonList(colName[0]), partitions,
Constants.HIVE_ENGINE);
+ Collections.singletonList(colName[0]), partitions);
request.setCatName(DEFAULT_CATALOG_NAME);
request.setValidWriteIdList(validWriteIds);
AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request);
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index d8492a16f17..8b6c8d6b1bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -317,7 +317,7 @@ public class ColumnStatsUpdateTask extends
Task<ColumnStatsUpdateWork> {
private int persistColumnStats(Hive db) throws HiveException, MetaException,
IOException {
ColumnStatistics colStats = constructColumnStatsFromInput();
SetPartitionsStatsRequest request =
- new SetPartitionsStatsRequest(Collections.singletonList(colStats),
Constants.HIVE_ENGINE);
+ new SetPartitionsStatsRequest(Collections.singletonList(colStats));
// Set writeId and validWriteId list for replicated statistics.
getColStats() will return
// non-null value only during replication.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index e6f945bc864..285175414d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -211,7 +211,7 @@ public class ColStatsProcessor implements IStatsProcessor {
if (colStats.isEmpty()) {
continue;
}
- SetPartitionsStatsRequest request = new
SetPartitionsStatsRequest(colStats, Constants.HIVE_ENGINE);
+ SetPartitionsStatsRequest request = new
SetPartitionsStatsRequest(colStats);
request.setNeedMerge(colStatDesc.isNeedMerge());
if (txnMgr != null) {
request.setWriteId(writeId);
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 2a7a8d65b5e..1ec6bcee3f1 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -13933,6 +13933,7 @@ __isset.validWriteIdList = true;
void SetPartitionsStatsRequest::__set_engine(const std::string& val) {
this->engine = val;
+__isset.engine = true;
}
std::ostream& operator<<(std::ostream& out, const SetPartitionsStatsRequest&
obj)
{
@@ -13954,7 +13955,6 @@ uint32_t
SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
using ::apache::thrift::protocol::TProtocolException;
bool isset_colStats = false;
- bool isset_engine = false;
while (true)
{
@@ -14011,7 +14011,7 @@ uint32_t
SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->engine);
- isset_engine = true;
+ this->__isset.engine = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -14027,8 +14027,6 @@ uint32_t
SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
if (!isset_colStats)
throw TProtocolException(TProtocolException::INVALID_DATA);
- if (!isset_engine)
- throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
@@ -14064,10 +14062,11 @@ uint32_t
SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeString(this->validWriteIdList);
xfer += oprot->writeFieldEnd();
}
- xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 5);
- xfer += oprot->writeString(this->engine);
- xfer += oprot->writeFieldEnd();
-
+ if (this->__isset.engine) {
+ xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 5);
+ xfer += oprot->writeString(this->engine);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -14107,7 +14106,7 @@ void SetPartitionsStatsRequest::printTo(std::ostream&
out) const {
out << ", " << "needMerge="; (__isset.needMerge ? (out <<
to_string(needMerge)) : (out << "<null>"));
out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) :
(out << "<null>"));
out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out <<
to_string(validWriteIdList)) : (out << "<null>"));
- out << ", " << "engine=" << to_string(engine);
+ out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) :
(out << "<null>"));
out << ")";
}
@@ -18221,6 +18220,7 @@ __isset.validWriteIdList = true;
void TableStatsRequest::__set_engine(const std::string& val) {
this->engine = val;
+__isset.engine = true;
}
void TableStatsRequest::__set_id(const int64_t val) {
@@ -18249,7 +18249,6 @@ uint32_t
TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_colNames = false;
- bool isset_engine = false;
while (true)
{
@@ -18314,7 +18313,7 @@ uint32_t
TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
case 6:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->engine);
- isset_engine = true;
+ this->__isset.engine = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -18342,8 +18341,6 @@ uint32_t
TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_colNames)
throw TProtocolException(TProtocolException::INVALID_DATA);
- if (!isset_engine)
- throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
@@ -18382,10 +18379,11 @@ uint32_t
TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeString(this->validWriteIdList);
xfer += oprot->writeFieldEnd();
}
- xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 6);
- xfer += oprot->writeString(this->engine);
- xfer += oprot->writeFieldEnd();
-
+ if (this->__isset.engine) {
+ xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 6);
+ xfer += oprot->writeString(this->engine);
+ xfer += oprot->writeFieldEnd();
+ }
if (this->__isset.id) {
xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_I64, 7);
xfer += oprot->writeI64(this->id);
@@ -18437,7 +18435,7 @@ void TableStatsRequest::printTo(std::ostream& out)
const {
out << ", " << "colNames=" << to_string(colNames);
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) :
(out << "<null>"));
out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out <<
to_string(validWriteIdList)) : (out << "<null>"));
- out << ", " << "engine=" << to_string(engine);
+ out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) :
(out << "<null>"));
out << ", " << "id="; (__isset.id ? (out << to_string(id)) : (out <<
"<null>"));
out << ")";
}
@@ -18475,6 +18473,7 @@ __isset.validWriteIdList = true;
void PartitionsStatsRequest::__set_engine(const std::string& val) {
this->engine = val;
+__isset.engine = true;
}
std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest& obj)
{
@@ -18499,7 +18498,6 @@ uint32_t
PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr
bool isset_tblName = false;
bool isset_colNames = false;
bool isset_partNames = false;
- bool isset_engine = false;
while (true)
{
@@ -18584,7 +18582,7 @@ uint32_t
PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr
case 7:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->engine);
- isset_engine = true;
+ this->__isset.engine = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -18606,8 +18604,6 @@ uint32_t
PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_partNames)
throw TProtocolException(TProtocolException::INVALID_DATA);
- if (!isset_engine)
- throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
@@ -18658,10 +18654,11 @@ uint32_t
PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op
xfer += oprot->writeString(this->validWriteIdList);
xfer += oprot->writeFieldEnd();
}
- xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 7);
- xfer += oprot->writeString(this->engine);
- xfer += oprot->writeFieldEnd();
-
+ if (this->__isset.engine) {
+ xfer += oprot->writeFieldBegin("engine",
::apache::thrift::protocol::T_STRING, 7);
+ xfer += oprot->writeString(this->engine);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -18709,7 +18706,7 @@ void PartitionsStatsRequest::printTo(std::ostream& out)
const {
out << ", " << "partNames=" << to_string(partNames);
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) :
(out << "<null>"));
out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out <<
to_string(validWriteIdList)) : (out << "<null>"));
- out << ", " << "engine=" << to_string(engine);
+ out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) :
(out << "<null>"));
out << ")";
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 4e0a567345b..2a65c40f531 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -5012,7 +5012,7 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc
&b);
std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& obj);
typedef struct _ColumnStatistics__isset {
- _ColumnStatistics__isset() : isStatsCompliant(false), engine(false) {}
+ _ColumnStatistics__isset() : isStatsCompliant(false), engine(true) {}
bool isStatsCompliant :1;
bool engine :1;
} _ColumnStatistics__isset;
@@ -5022,9 +5022,8 @@ class ColumnStatistics : public virtual
::apache::thrift::TBase {
ColumnStatistics(const ColumnStatistics&);
ColumnStatistics& operator=(const ColumnStatistics&);
- ColumnStatistics() noexcept
- : isStatsCompliant(0),
- engine() {
+ ColumnStatistics() : isStatsCompliant(0),
+ engine("hive") {
}
virtual ~ColumnStatistics() noexcept;
@@ -5938,10 +5937,11 @@ void swap(AggrStats &a, AggrStats &b);
std::ostream& operator<<(std::ostream& out, const AggrStats& obj);
typedef struct _SetPartitionsStatsRequest__isset {
- _SetPartitionsStatsRequest__isset() : needMerge(false), writeId(true),
validWriteIdList(false) {}
+ _SetPartitionsStatsRequest__isset() : needMerge(false), writeId(true),
validWriteIdList(false), engine(true) {}
bool needMerge :1;
bool writeId :1;
bool validWriteIdList :1;
+ bool engine :1;
} _SetPartitionsStatsRequest__isset;
class SetPartitionsStatsRequest : public virtual ::apache::thrift::TBase {
@@ -5949,11 +5949,10 @@ class SetPartitionsStatsRequest : public virtual
::apache::thrift::TBase {
SetPartitionsStatsRequest(const SetPartitionsStatsRequest&);
SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&);
- SetPartitionsStatsRequest() noexcept
- : needMerge(0),
- writeId(-1LL),
- validWriteIdList(),
- engine() {
+ SetPartitionsStatsRequest() : needMerge(0),
+ writeId(-1LL),
+ validWriteIdList(),
+ engine("hive") {
}
virtual ~SetPartitionsStatsRequest() noexcept;
@@ -5991,7 +5990,9 @@ class SetPartitionsStatsRequest : public virtual
::apache::thrift::TBase {
return false;
else if (__isset.validWriteIdList && !(validWriteIdList ==
rhs.validWriteIdList))
return false;
- if (!(engine == rhs.engine))
+ if (__isset.engine != rhs.__isset.engine)
+ return false;
+ else if (__isset.engine && !(engine == rhs.engine))
return false;
return true;
}
@@ -7473,9 +7474,10 @@ void swap(PartitionsStatsResult &a,
PartitionsStatsResult &b);
std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& obj);
typedef struct _TableStatsRequest__isset {
- _TableStatsRequest__isset() : catName(false), validWriteIdList(false),
id(true) {}
+ _TableStatsRequest__isset() : catName(false), validWriteIdList(false),
engine(true), id(true) {}
bool catName :1;
bool validWriteIdList :1;
+ bool engine :1;
bool id :1;
} _TableStatsRequest__isset;
@@ -7484,13 +7486,12 @@ class TableStatsRequest : public virtual
::apache::thrift::TBase {
TableStatsRequest(const TableStatsRequest&);
TableStatsRequest& operator=(const TableStatsRequest&);
- TableStatsRequest() noexcept
- : dbName(),
- tblName(),
- catName(),
- validWriteIdList(),
- engine(),
- id(-1LL) {
+ TableStatsRequest() : dbName(),
+ tblName(),
+ catName(),
+ validWriteIdList(),
+ engine("hive"),
+ id(-1LL) {
}
virtual ~TableStatsRequest() noexcept;
@@ -7534,7 +7535,9 @@ class TableStatsRequest : public virtual
::apache::thrift::TBase {
return false;
else if (__isset.validWriteIdList && !(validWriteIdList ==
rhs.validWriteIdList))
return false;
- if (!(engine == rhs.engine))
+ if (__isset.engine != rhs.__isset.engine)
+ return false;
+ else if (__isset.engine && !(engine == rhs.engine))
return false;
if (__isset.id != rhs.__isset.id)
return false;
@@ -7559,9 +7562,10 @@ void swap(TableStatsRequest &a, TableStatsRequest &b);
std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj);
typedef struct _PartitionsStatsRequest__isset {
- _PartitionsStatsRequest__isset() : catName(false), validWriteIdList(false) {}
+ _PartitionsStatsRequest__isset() : catName(false), validWriteIdList(false),
engine(true) {}
bool catName :1;
bool validWriteIdList :1;
+ bool engine :1;
} _PartitionsStatsRequest__isset;
class PartitionsStatsRequest : public virtual ::apache::thrift::TBase {
@@ -7569,12 +7573,11 @@ class PartitionsStatsRequest : public virtual
::apache::thrift::TBase {
PartitionsStatsRequest(const PartitionsStatsRequest&);
PartitionsStatsRequest& operator=(const PartitionsStatsRequest&);
- PartitionsStatsRequest() noexcept
- : dbName(),
- tblName(),
- catName(),
- validWriteIdList(),
- engine() {
+ PartitionsStatsRequest() : dbName(),
+ tblName(),
+ catName(),
+ validWriteIdList(),
+ engine("hive") {
}
virtual ~PartitionsStatsRequest() noexcept;
@@ -7620,7 +7623,9 @@ class PartitionsStatsRequest : public virtual
::apache::thrift::TBase {
return false;
else if (__isset.validWriteIdList && !(validWriteIdList ==
rhs.validWriteIdList))
return false;
- if (!(engine == rhs.engine))
+ if (__isset.engine != rhs.__isset.engine)
+ return false;
+ else if (__isset.engine && !(engine == rhs.engine))
return false;
return true;
}
@@ -8254,7 +8259,7 @@ void swap(PartitionValuesResponse &a,
PartitionValuesResponse &b);
std::ostream& operator<<(std::ostream& out, const PartitionValuesResponse&
obj);
typedef struct _GetPartitionsByNamesRequest__isset {
- _GetPartitionsByNamesRequest__isset() : names(false), get_col_stats(false),
processorCapabilities(false), processorIdentifier(false), engine(false),
validWriteIdList(false), getFileMetadata(false), id(true),
skipColumnSchemaForPartition(false), includeParamKeyPattern(false),
excludeParamKeyPattern(false) {}
+ _GetPartitionsByNamesRequest__isset() : names(false), get_col_stats(false),
processorCapabilities(false), processorIdentifier(false), engine(true),
validWriteIdList(false), getFileMetadata(false), id(true),
skipColumnSchemaForPartition(false), includeParamKeyPattern(false),
excludeParamKeyPattern(false) {}
bool names :1;
bool get_col_stats :1;
bool processorCapabilities :1;
@@ -8273,18 +8278,17 @@ class GetPartitionsByNamesRequest : public virtual
::apache::thrift::TBase {
GetPartitionsByNamesRequest(const GetPartitionsByNamesRequest&);
GetPartitionsByNamesRequest& operator=(const GetPartitionsByNamesRequest&);
- GetPartitionsByNamesRequest() noexcept
- : db_name(),
- tbl_name(),
- get_col_stats(0),
- processorIdentifier(),
- engine(),
- validWriteIdList(),
- getFileMetadata(0),
- id(-1LL),
- skipColumnSchemaForPartition(0),
- includeParamKeyPattern(),
- excludeParamKeyPattern() {
+ GetPartitionsByNamesRequest() : db_name(),
+ tbl_name(),
+ get_col_stats(0),
+ processorIdentifier(),
+ engine("hive"),
+ validWriteIdList(),
+ getFileMetadata(0),
+ id(-1LL),
+ skipColumnSchemaForPartition(0),
+ includeParamKeyPattern(),
+ excludeParamKeyPattern() {
}
virtual ~GetPartitionsByNamesRequest() noexcept;
@@ -13888,7 +13892,7 @@ void swap(GetProjectionsSpec &a, GetProjectionsSpec &b);
std::ostream& operator<<(std::ostream& out, const GetProjectionsSpec& obj);
typedef struct _GetTableRequest__isset {
- _GetTableRequest__isset() : capabilities(false), catName(false),
validWriteIdList(false), getColumnStats(false), processorCapabilities(false),
processorIdentifier(false), engine(false), id(true) {}
+ _GetTableRequest__isset() : capabilities(false), catName(false),
validWriteIdList(false), getColumnStats(false), processorCapabilities(false),
processorIdentifier(false), engine(true), id(true) {}
bool capabilities :1;
bool catName :1;
bool validWriteIdList :1;
@@ -13904,15 +13908,14 @@ class GetTableRequest : public virtual
::apache::thrift::TBase {
GetTableRequest(const GetTableRequest&);
GetTableRequest& operator=(const GetTableRequest&);
- GetTableRequest() noexcept
- : dbName(),
- tblName(),
- catName(),
- validWriteIdList(),
- getColumnStats(0),
- processorIdentifier(),
- engine(),
- id(-1LL) {
+ GetTableRequest() : dbName(),
+ tblName(),
+ catName(),
+ validWriteIdList(),
+ getColumnStats(0),
+ processorIdentifier(),
+ engine("hive"),
+ id(-1LL) {
}
virtual ~GetTableRequest() noexcept;
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
index 782586acece..3fffbce5d10 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@ -114,6 +114,8 @@ package org.apache.hadoop.hive.metastore.api;
}
public ColumnStatistics() {
+ this.engine = "hive";
+
}
public ColumnStatistics(
@@ -156,7 +158,8 @@ package org.apache.hadoop.hive.metastore.api;
this.statsObj = null;
setIsStatsCompliantIsSet(false);
this.isStatsCompliant = false;
- this.engine = null;
+ this.engine = "hive";
+
}
@org.apache.thrift.annotation.Nullable
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java
index 2ecd9aa0c1a..2654f09d4f4 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java
@@ -181,6 +181,8 @@ package org.apache.hadoop.hive.metastore.api;
}
public GetPartitionsByNamesRequest() {
+ this.engine = "hive";
+
this.id = -1L;
}
@@ -247,7 +249,8 @@ package org.apache.hadoop.hive.metastore.api;
this.get_col_stats = false;
this.processorCapabilities = null;
this.processorIdentifier = null;
- this.engine = null;
+ this.engine = "hive";
+
this.validWriteIdList = null;
setGetFileMetadataIsSet(false);
this.getFileMetadata = false;
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 50d04e78653..a333674af92 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -157,6 +157,8 @@ package org.apache.hadoop.hive.metastore.api;
}
public GetTableRequest() {
+ this.engine = "hive";
+
this.id = -1L;
}
@@ -219,7 +221,8 @@ package org.apache.hadoop.hive.metastore.api;
this.getColumnStats = false;
this.processorCapabilities = null;
this.processorIdentifier = null;
- this.engine = null;
+ this.engine = "hive";
+
this.id = -1L;
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index bdddf8844bd..7ef14ac7706 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@ -28,7 +28,7 @@ package org.apache.hadoop.hive.metastore.api;
private @org.apache.thrift.annotation.Nullable
java.util.List<java.lang.String> partNames; // required
private @org.apache.thrift.annotation.Nullable java.lang.String catName; //
optional
private @org.apache.thrift.annotation.Nullable java.lang.String
validWriteIdList; // optional
- private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
required
+ private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
optional
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -109,7 +109,7 @@ package org.apache.hadoop.hive.metastore.api;
}
// isset id assignments
- private static final _Fields optionals[] =
{_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
+ private static final _Fields optionals[] =
{_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE};
public static final java.util.Map<_Fields,
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields,
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -127,28 +127,28 @@ package org.apache.hadoop.hive.metastore.api;
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new
org.apache.thrift.meta_data.FieldMetaData("validWriteIdList",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.REQUIRED,
+ tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class,
metaDataMap);
}
public PartitionsStatsRequest() {
+ this.engine = "hive";
+
}
public PartitionsStatsRequest(
java.lang.String dbName,
java.lang.String tblName,
java.util.List<java.lang.String> colNames,
- java.util.List<java.lang.String> partNames,
- java.lang.String engine)
+ java.util.List<java.lang.String> partNames)
{
this();
this.dbName = dbName;
this.tblName = tblName;
this.colNames = colNames;
this.partNames = partNames;
- this.engine = engine;
}
/**
@@ -192,7 +192,8 @@ package org.apache.hadoop.hive.metastore.api;
this.partNames = null;
this.catName = null;
this.validWriteIdList = null;
- this.engine = null;
+ this.engine = "hive";
+
}
@org.apache.thrift.annotation.Nullable
@@ -773,14 +774,16 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
- if (!first) sb.append(", ");
- sb.append("engine:");
- if (this.engine == null) {
- sb.append("null");
- } else {
- sb.append(this.engine);
+ if (isSetEngine()) {
+ if (!first) sb.append(", ");
+ sb.append("engine:");
+ if (this.engine == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.engine);
+ }
+ first = false;
}
- first = false;
sb.append(")");
return sb.toString();
}
@@ -803,10 +806,6 @@ package org.apache.hadoop.hive.metastore.api;
throw new org.apache.thrift.protocol.TProtocolException("Required field
'partNames' is unset! Struct:" + toString());
}
- if (!isSetEngine()) {
- throw new org.apache.thrift.protocol.TProtocolException("Required field
'engine' is unset! Struct:" + toString());
- }
-
// check for sub-struct validity
}
@@ -982,9 +981,11 @@ package org.apache.hadoop.hive.metastore.api;
}
}
if (struct.engine != null) {
- oprot.writeFieldBegin(ENGINE_FIELD_DESC);
- oprot.writeString(struct.engine);
- oprot.writeFieldEnd();
+ if (struct.isSetEngine()) {
+ oprot.writeFieldBegin(ENGINE_FIELD_DESC);
+ oprot.writeString(struct.engine);
+ oprot.writeFieldEnd();
+ }
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -1019,7 +1020,6 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeString(_iter627);
}
}
- oprot.writeString(struct.engine);
java.util.BitSet optionals = new java.util.BitSet();
if (struct.isSetCatName()) {
optionals.set(0);
@@ -1027,13 +1027,19 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetValidWriteIdList()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetEngine()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
if (struct.isSetValidWriteIdList()) {
oprot.writeString(struct.validWriteIdList);
}
+ if (struct.isSetEngine()) {
+ oprot.writeString(struct.engine);
+ }
}
@Override
@@ -1065,9 +1071,7 @@ package org.apache.hadoop.hive.metastore.api;
}
}
struct.setPartNamesIsSet(true);
- struct.engine = iprot.readString();
- struct.setEngineIsSet(true);
- java.util.BitSet incoming = iprot.readBitSet(2);
+ java.util.BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
@@ -1076,6 +1080,10 @@ package org.apache.hadoop.hive.metastore.api;
struct.validWriteIdList = iprot.readString();
struct.setValidWriteIdListIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.engine = iprot.readString();
+ struct.setEngineIsSet(true);
+ }
}
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index 1c373d92da2..c10e726c6e7 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@ -24,7 +24,7 @@ package org.apache.hadoop.hive.metastore.api;
private boolean needMerge; // optional
private long writeId; // optional
private @org.apache.thrift.annotation.Nullable java.lang.String
validWriteIdList; // optional
- private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
required
+ private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
optional
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -102,7 +102,7 @@ package org.apache.hadoop.hive.metastore.api;
private static final int __NEEDMERGE_ISSET_ID = 0;
private static final int __WRITEID_ISSET_ID = 1;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] =
{_Fields.NEED_MERGE,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+ private static final _Fields optionals[] =
{_Fields.NEED_MERGE,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE};
public static final java.util.Map<_Fields,
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields,
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -115,7 +115,7 @@ package org.apache.hadoop.hive.metastore.api;
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new
org.apache.thrift.meta_data.FieldMetaData("validWriteIdList",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.REQUIRED,
+ tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class,
metaDataMap);
@@ -124,15 +124,15 @@ package org.apache.hadoop.hive.metastore.api;
public SetPartitionsStatsRequest() {
this.writeId = -1L;
+ this.engine = "hive";
+
}
public SetPartitionsStatsRequest(
- java.util.List<ColumnStatistics> colStats,
- java.lang.String engine)
+ java.util.List<ColumnStatistics> colStats)
{
this();
this.colStats = colStats;
- this.engine = engine;
}
/**
@@ -169,7 +169,8 @@ package org.apache.hadoop.hive.metastore.api;
this.writeId = -1L;
this.validWriteIdList = null;
- this.engine = null;
+ this.engine = "hive";
+
}
public int getColStatsSize() {
@@ -588,14 +589,16 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
- if (!first) sb.append(", ");
- sb.append("engine:");
- if (this.engine == null) {
- sb.append("null");
- } else {
- sb.append(this.engine);
+ if (isSetEngine()) {
+ if (!first) sb.append(", ");
+ sb.append("engine:");
+ if (this.engine == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.engine);
+ }
+ first = false;
}
- first = false;
sb.append(")");
return sb.toString();
}
@@ -606,10 +609,6 @@ package org.apache.hadoop.hive.metastore.api;
throw new org.apache.thrift.protocol.TProtocolException("Required field
'colStats' is unset! Struct:" + toString());
}
- if (!isSetEngine()) {
- throw new org.apache.thrift.protocol.TProtocolException("Required field
'engine' is unset! Struct:" + toString());
- }
-
// check for sub-struct validity
}
@@ -743,9 +742,11 @@ package org.apache.hadoop.hive.metastore.api;
}
}
if (struct.engine != null) {
- oprot.writeFieldBegin(ENGINE_FIELD_DESC);
- oprot.writeString(struct.engine);
- oprot.writeFieldEnd();
+ if (struct.isSetEngine()) {
+ oprot.writeFieldBegin(ENGINE_FIELD_DESC);
+ oprot.writeString(struct.engine);
+ oprot.writeFieldEnd();
+ }
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -771,7 +772,6 @@ package org.apache.hadoop.hive.metastore.api;
_iter450.write(oprot);
}
}
- oprot.writeString(struct.engine);
java.util.BitSet optionals = new java.util.BitSet();
if (struct.isSetNeedMerge()) {
optionals.set(0);
@@ -782,7 +782,10 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetValidWriteIdList()) {
optionals.set(2);
}
- oprot.writeBitSet(optionals, 3);
+ if (struct.isSetEngine()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetNeedMerge()) {
oprot.writeBool(struct.needMerge);
}
@@ -792,6 +795,9 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetValidWriteIdList()) {
oprot.writeString(struct.validWriteIdList);
}
+ if (struct.isSetEngine()) {
+ oprot.writeString(struct.engine);
+ }
}
@Override
@@ -809,9 +815,7 @@ package org.apache.hadoop.hive.metastore.api;
}
}
struct.setColStatsIsSet(true);
- struct.engine = iprot.readString();
- struct.setEngineIsSet(true);
- java.util.BitSet incoming = iprot.readBitSet(3);
+ java.util.BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.needMerge = iprot.readBool();
struct.setNeedMergeIsSet(true);
@@ -824,6 +828,10 @@ package org.apache.hadoop.hive.metastore.api;
struct.validWriteIdList = iprot.readString();
struct.setValidWriteIdListIsSet(true);
}
+ if (incoming.get(3)) {
+ struct.engine = iprot.readString();
+ struct.setEngineIsSet(true);
+ }
}
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index ea6626b78fe..685fe9d6982 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@ -27,7 +27,7 @@ package org.apache.hadoop.hive.metastore.api;
private @org.apache.thrift.annotation.Nullable
java.util.List<java.lang.String> colNames; // required
private @org.apache.thrift.annotation.Nullable java.lang.String catName; //
optional
private @org.apache.thrift.annotation.Nullable java.lang.String
validWriteIdList; // optional
- private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
required
+ private @org.apache.thrift.annotation.Nullable java.lang.String engine; //
optional
private long id; // optional
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
@@ -111,7 +111,7 @@ package org.apache.hadoop.hive.metastore.api;
// isset id assignments
private static final int __ID_ISSET_ID = 0;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] =
{_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ID};
+ private static final _Fields optionals[] =
{_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE,_Fields.ID};
public static final java.util.Map<_Fields,
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields,
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -126,7 +126,7 @@ package org.apache.hadoop.hive.metastore.api;
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new
org.apache.thrift.meta_data.FieldMetaData("validWriteIdList",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.REQUIRED,
+ tmpMap.put(_Fields.ENGINE, new
org.apache.thrift.meta_data.FieldMetaData("engine",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
@@ -135,6 +135,8 @@ package org.apache.hadoop.hive.metastore.api;
}
public TableStatsRequest() {
+ this.engine = "hive";
+
this.id = -1L;
}
@@ -142,14 +144,12 @@ package org.apache.hadoop.hive.metastore.api;
public TableStatsRequest(
java.lang.String dbName,
java.lang.String tblName,
- java.util.List<java.lang.String> colNames,
- java.lang.String engine)
+ java.util.List<java.lang.String> colNames)
{
this();
this.dbName = dbName;
this.tblName = tblName;
this.colNames = colNames;
- this.engine = engine;
}
/**
@@ -190,7 +190,8 @@ package org.apache.hadoop.hive.metastore.api;
this.colNames = null;
this.catName = null;
this.validWriteIdList = null;
- this.engine = null;
+ this.engine = "hive";
+
this.id = -1L;
}
@@ -747,14 +748,16 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
- if (!first) sb.append(", ");
- sb.append("engine:");
- if (this.engine == null) {
- sb.append("null");
- } else {
- sb.append(this.engine);
+ if (isSetEngine()) {
+ if (!first) sb.append(", ");
+ sb.append("engine:");
+ if (this.engine == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.engine);
+ }
+ first = false;
}
- first = false;
if (isSetId()) {
if (!first) sb.append(", ");
sb.append("id:");
@@ -779,10 +782,6 @@ package org.apache.hadoop.hive.metastore.api;
throw new org.apache.thrift.protocol.TProtocolException("Required field
'colNames' is unset! Struct:" + toString());
}
- if (!isSetEngine()) {
- throw new org.apache.thrift.protocol.TProtocolException("Required field
'engine' is unset! Struct:" + toString());
- }
-
// check for sub-struct validity
}
@@ -938,9 +937,11 @@ package org.apache.hadoop.hive.metastore.api;
}
}
if (struct.engine != null) {
- oprot.writeFieldBegin(ENGINE_FIELD_DESC);
- oprot.writeString(struct.engine);
- oprot.writeFieldEnd();
+ if (struct.isSetEngine()) {
+ oprot.writeFieldBegin(ENGINE_FIELD_DESC);
+ oprot.writeString(struct.engine);
+ oprot.writeFieldEnd();
+ }
}
if (struct.isSetId()) {
oprot.writeFieldBegin(ID_FIELD_DESC);
@@ -973,7 +974,6 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeString(_iter614);
}
}
- oprot.writeString(struct.engine);
java.util.BitSet optionals = new java.util.BitSet();
if (struct.isSetCatName()) {
optionals.set(0);
@@ -981,16 +981,22 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetValidWriteIdList()) {
optionals.set(1);
}
- if (struct.isSetId()) {
+ if (struct.isSetEngine()) {
optionals.set(2);
}
- oprot.writeBitSet(optionals, 3);
+ if (struct.isSetId()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
if (struct.isSetValidWriteIdList()) {
oprot.writeString(struct.validWriteIdList);
}
+ if (struct.isSetEngine()) {
+ oprot.writeString(struct.engine);
+ }
if (struct.isSetId()) {
oprot.writeI64(struct.id);
}
@@ -1014,9 +1020,7 @@ package org.apache.hadoop.hive.metastore.api;
}
}
struct.setColNamesIsSet(true);
- struct.engine = iprot.readString();
- struct.setEngineIsSet(true);
- java.util.BitSet incoming = iprot.readBitSet(3);
+ java.util.BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
@@ -1026,6 +1030,10 @@ package org.apache.hadoop.hive.metastore.api;
struct.setValidWriteIdListIsSet(true);
}
if (incoming.get(2)) {
+ struct.engine = iprot.readString();
+ struct.setEngineIsSet(true);
+ }
+ if (incoming.get(3)) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php
index 0387862ec87..e5f9553ef95 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php
@@ -64,7 +64,7 @@ class ColumnStatistics
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
public function __construct($vals = null)
{
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php
index c06aa5ac27e..e21e5fa5ad9 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php
@@ -123,7 +123,7 @@ class GetPartitionsByNamesRequest
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
/**
* @var string
*/
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php
index 3c56331bb0f..217f5a377b2 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php
@@ -113,7 +113,7 @@ class GetTableRequest
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
/**
* @var int
*/
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php
index c7b3f053ae7..2c039313bd5 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php
@@ -61,7 +61,7 @@ class PartitionsStatsRequest
),
7 => array(
'var' => 'engine',
- 'isRequired' => true,
+ 'isRequired' => false,
'type' => TType::STRING,
),
);
@@ -93,7 +93,7 @@ class PartitionsStatsRequest
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
public function __construct($vals = null)
{
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php
index dae3c80aa27..ca7bad0cfb2 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php
@@ -48,7 +48,7 @@ class SetPartitionsStatsRequest
),
5 => array(
'var' => 'engine',
- 'isRequired' => true,
+ 'isRequired' => false,
'type' => TType::STRING,
),
);
@@ -72,7 +72,7 @@ class SetPartitionsStatsRequest
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
public function __construct($vals = null)
{
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php
index 00c26651ff4..00d3ebb33d9 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php
@@ -52,7 +52,7 @@ class TableStatsRequest
),
6 => array(
'var' => 'engine',
- 'isRequired' => true,
+ 'isRequired' => false,
'type' => TType::STRING,
),
7 => array(
@@ -85,7 +85,7 @@ class TableStatsRequest
/**
* @var string
*/
- public $engine = null;
+ public $engine = "hive";
/**
* @var int
*/
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index c7fe5de1aa0..7f3c0e949ff 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -6478,7 +6478,7 @@ class ColumnStatistics(object):
"""
- def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None,
engine=None,):
+ def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None,
engine="hive",):
self.statsDesc = statsDesc
self.statsObj = statsObj
self.isStatsCompliant = isStatsCompliant
@@ -7955,7 +7955,7 @@ class SetPartitionsStatsRequest(object):
"""
- def __init__(self, colStats=None, needMerge=None, writeId=-1,
validWriteIdList=None, engine=None,):
+ def __init__(self, colStats=None, needMerge=None, writeId=-1,
validWriteIdList=None, engine="hive",):
self.colStats = colStats
self.needMerge = needMerge
self.writeId = writeId
@@ -8041,8 +8041,6 @@ class SetPartitionsStatsRequest(object):
def validate(self):
if self.colStats is None:
raise TProtocolException(message='Required field colStats is
unset!')
- if self.engine is None:
- raise TProtocolException(message='Required field engine is unset!')
return
def __repr__(self):
@@ -10466,7 +10464,7 @@ class TableStatsRequest(object):
"""
- def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,
validWriteIdList=None, engine=None, id=-1,):
+ def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,
validWriteIdList=None, engine="hive", id=-1,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
@@ -10575,8 +10573,6 @@ class TableStatsRequest(object):
raise TProtocolException(message='Required field tblName is
unset!')
if self.colNames is None:
raise TProtocolException(message='Required field colNames is
unset!')
- if self.engine is None:
- raise TProtocolException(message='Required field engine is unset!')
return
def __repr__(self):
@@ -10605,7 +10601,7 @@ class PartitionsStatsRequest(object):
"""
- def __init__(self, dbName=None, tblName=None, colNames=None,
partNames=None, catName=None, validWriteIdList=None, engine=None,):
+ def __init__(self, dbName=None, tblName=None, colNames=None,
partNames=None, catName=None, validWriteIdList=None, engine="hive",):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
@@ -10724,8 +10720,6 @@ class PartitionsStatsRequest(object):
raise TProtocolException(message='Required field colNames is
unset!')
if self.partNames is None:
raise TProtocolException(message='Required field partNames is
unset!')
- if self.engine is None:
- raise TProtocolException(message='Required field engine is unset!')
return
def __repr__(self):
@@ -11728,7 +11722,7 @@ class GetPartitionsByNamesRequest(object):
"""
- def __init__(self, db_name=None, tbl_name=None, names=None,
get_col_stats=None, processorCapabilities=None, processorIdentifier=None,
engine=None, validWriteIdList=None, getFileMetadata=None, id=-1,
skipColumnSchemaForPartition=None, includeParamKeyPattern=None,
excludeParamKeyPattern=None,):
+ def __init__(self, db_name=None, tbl_name=None, names=None,
get_col_stats=None, processorCapabilities=None, processorIdentifier=None,
engine="hive", validWriteIdList=None, getFileMetadata=None, id=-1,
skipColumnSchemaForPartition=None, includeParamKeyPattern=None,
excludeParamKeyPattern=None,):
self.db_name = db_name
self.tbl_name = tbl_name
self.names = names
@@ -20259,7 +20253,7 @@ class GetTableRequest(object):
"""
- def __init__(self, dbName=None, tblName=None, capabilities=None,
catName=None, validWriteIdList=None, getColumnStats=None,
processorCapabilities=None, processorIdentifier=None, engine=None, id=-1,):
+ def __init__(self, dbName=None, tblName=None, capabilities=None,
catName=None, validWriteIdList=None, getColumnStats=None,
processorCapabilities=None, processorIdentifier=None, engine="hive", id=-1,):
self.dbName = dbName
self.tblName = tblName
self.capabilities = capabilities
@@ -31727,7 +31721,7 @@ ColumnStatistics.thrift_spec = (
(1, TType.STRUCT, 'statsDesc', [ColumnStatisticsDesc, None], None, ), # 1
(2, TType.LIST, 'statsObj', (TType.STRUCT, [ColumnStatisticsObj, None],
False), None, ), # 2
(3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3
- (4, TType.STRING, 'engine', 'UTF8', None, ), # 4
+ (4, TType.STRING, 'engine', 'UTF8', "hive", ), # 4
)
all_structs.append(FileMetadata)
FileMetadata.thrift_spec = (
@@ -31845,7 +31839,7 @@ SetPartitionsStatsRequest.thrift_spec = (
(2, TType.BOOL, 'needMerge', None, None, ), # 2
(3, TType.I64, 'writeId', None, -1, ), # 3
(4, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 4
- (5, TType.STRING, 'engine', 'UTF8', None, ), # 5
+ (5, TType.STRING, 'engine', 'UTF8', "hive", ), # 5
)
all_structs.append(SetPartitionsStatsResponse)
SetPartitionsStatsResponse.thrift_spec = (
@@ -32044,7 +32038,7 @@ TableStatsRequest.thrift_spec = (
(3, TType.LIST, 'colNames', (TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.STRING, 'catName', 'UTF8', None, ), # 4
(5, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 5
- (6, TType.STRING, 'engine', 'UTF8', None, ), # 6
+ (6, TType.STRING, 'engine', 'UTF8', "hive", ), # 6
(7, TType.I64, 'id', None, -1, ), # 7
)
all_structs.append(PartitionsStatsRequest)
@@ -32056,7 +32050,7 @@ PartitionsStatsRequest.thrift_spec = (
(4, TType.LIST, 'partNames', (TType.STRING, 'UTF8', False), None, ), # 4
(5, TType.STRING, 'catName', 'UTF8', None, ), # 5
(6, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 6
- (7, TType.STRING, 'engine', 'UTF8', None, ), # 7
+ (7, TType.STRING, 'engine', 'UTF8', "hive", ), # 7
)
all_structs.append(AddPartitionsResult)
AddPartitionsResult.thrift_spec = (
@@ -32142,7 +32136,7 @@ GetPartitionsByNamesRequest.thrift_spec = (
(4, TType.BOOL, 'get_col_stats', None, None, ), # 4
(5, TType.LIST, 'processorCapabilities', (TType.STRING, 'UTF8', False),
None, ), # 5
(6, TType.STRING, 'processorIdentifier', 'UTF8', None, ), # 6
- (7, TType.STRING, 'engine', 'UTF8', None, ), # 7
+ (7, TType.STRING, 'engine', 'UTF8', "hive", ), # 7
(8, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 8
(9, TType.BOOL, 'getFileMetadata', None, None, ), # 9
(10, TType.I64, 'id', None, -1, ), # 10
@@ -32834,7 +32828,7 @@ GetTableRequest.thrift_spec = (
(7, TType.BOOL, 'getColumnStats', None, None, ), # 7
(8, TType.LIST, 'processorCapabilities', (TType.STRING, 'UTF8', False),
None, ), # 8
(9, TType.STRING, 'processorIdentifier', 'UTF8', None, ), # 9
- (10, TType.STRING, 'engine', 'UTF8', None, ), # 10
+ (10, TType.STRING, 'engine', 'UTF8', "hive", ), # 10
(11, TType.I64, 'id', None, -1, ), # 11
)
all_structs.append(GetTableResult)
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 634f58351bb..b5fcabee250 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -2342,7 +2342,7 @@ class ColumnStatistics
STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc',
:class => ::ColumnStatisticsDesc},
STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element
=> {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name =>
'isStatsCompliant', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional
=> true}
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true}
}
def struct_fields; FIELDS; end
@@ -2656,14 +2656,13 @@ class SetPartitionsStatsRequest
NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge',
:optional => true},
WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default =>
-1, :optional => true},
VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name =>
'validWriteIdList', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'}
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true}
}
def struct_fields; FIELDS; end
def validate
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field colStats is unset!') unless @colStats
- raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field engine is unset!') unless @engine
end
::Thrift::Struct.generate_accessors self
@@ -3274,7 +3273,7 @@ class TableStatsRequest
COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element
=> {:type => ::Thrift::Types::STRING}},
CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName',
:optional => true},
VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name =>
'validWriteIdList', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'},
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true},
ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1,
:optional => true}
}
@@ -3284,7 +3283,6 @@ class TableStatsRequest
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field dbName is unset!') unless @dbName
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field tblName is unset!') unless @tblName
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field colNames is unset!') unless @colNames
- raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field engine is unset!') unless @engine
end
::Thrift::Struct.generate_accessors self
@@ -3307,7 +3305,7 @@ class PartitionsStatsRequest
PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames',
:element => {:type => ::Thrift::Types::STRING}},
CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName',
:optional => true},
VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name =>
'validWriteIdList', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'}
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true}
}
def struct_fields; FIELDS; end
@@ -3317,7 +3315,6 @@ class PartitionsStatsRequest
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field tblName is unset!') unless @tblName
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field colNames is unset!') unless @colNames
raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field partNames is unset!') unless @partNames
- raise
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required
field engine is unset!') unless @engine
end
::Thrift::Struct.generate_accessors self
@@ -3574,7 +3571,7 @@ class GetPartitionsByNamesRequest
GET_COL_STATS => {:type => ::Thrift::Types::BOOL, :name =>
'get_col_stats', :optional => true},
PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name =>
'processorCapabilities', :element => {:type => ::Thrift::Types::STRING},
:optional => true},
PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name =>
'processorIdentifier', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional
=> true},
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true},
VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name =>
'validWriteIdList', :optional => true},
GETFILEMETADATA => {:type => ::Thrift::Types::BOOL, :name =>
'getFileMetadata', :optional => true},
ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1,
:optional => true},
@@ -5705,7 +5702,7 @@ class GetTableRequest
GETCOLUMNSTATS => {:type => ::Thrift::Types::BOOL, :name =>
'getColumnStats', :optional => true},
PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name =>
'processorCapabilities', :element => {:type => ::Thrift::Types::STRING},
:optional => true},
PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name =>
'processorIdentifier', :optional => true},
- ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional
=> true},
+ ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default
=> %q"hive", :optional => true},
ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1,
:optional => true}
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 9b7cbb82df6..7d484bf44cd 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1272,7 +1272,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
List<String> colNames, String engine, String validWriteIdList)
throws NoSuchObjectException, MetaException, TException {
PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName,
tableName, colNames,
- partNames == null ? new ArrayList<String>() : partNames, engine);
+ partNames == null ? new ArrayList<String>() : partNames);
+ rqst.setEngine(engine);
rqst.setCatName(catName);
rqst.setValidWriteIdList(validWriteIdList);
return client.get_partitions_statistics_req(rqst).getPartStats();
@@ -1297,7 +1298,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
LOG.debug("Columns is empty or partNames is empty : Short-circuiting
stats eval on client side.");
return new AggrStats(new ArrayList<>(), 0); // Nothing to aggregate
}
- PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames, engine);
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames);
+ req.setEngine(engine);
req.setCatName(catName);
req.setValidWriteIdList(writeIdList);
@@ -3446,7 +3448,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
if (colNames.isEmpty()) {
return Collections.emptyList();
}
- TableStatsRequest rqst = new TableStatsRequest(dbName, tableName,
colNames, engine);
+ TableStatsRequest rqst = new TableStatsRequest(dbName, tableName,
colNames);
+ rqst.setEngine(engine);
rqst.setCatName(catName);
rqst.setEngine(engine);
return getTableColumnStatisticsInternal(rqst).getTableStats();
@@ -3479,7 +3482,7 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
if (colNames.isEmpty()) {
return Collections.emptyList();
}
- TableStatsRequest rqst = new TableStatsRequest(dbName, tableName,
colNames, engine);
+ TableStatsRequest rqst = new TableStatsRequest(dbName, tableName,
colNames);
rqst.setEngine(engine);
rqst.setCatName(catName);
rqst.setValidWriteIdList(validWriteIdList);
@@ -3505,8 +3508,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
String catName, String dbName, String tableName, List<String> partNames,
List<String> colNames, String engine) throws TException {
- PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName,
tableName, colNames,
- partNames, engine);
+ PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName,
tableName, colNames, partNames);
+ rqst.setEngine(engine);
rqst.setCatName(catName);
rqst.setValidWriteIdList(getValidWriteIdList(dbName, tableName));
return client.get_partitions_statistics_req(rqst).getPartStats();
@@ -4659,7 +4662,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
LOG.debug("Columns is empty or partNames is empty : Short-circuiting
stats eval on client side.");
return new AggrStats(new ArrayList<>(), 0); // Nothing to aggregate
}
- PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames, engine);
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames);
+ req.setEngine(engine);
req.setCatName(catName);
req.setValidWriteIdList(getValidWriteIdList(dbName, tblName));
diff --git
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 422f23fdfff..cbe1d5c96b3 100644
---
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -600,7 +600,7 @@ struct ColumnStatistics {
2: required list<ColumnStatisticsObj> statsObj,
3: optional bool isStatsCompliant, // Are the stats isolation-level-compliant
with the
// the calling query?
-4: optional string engine
+4: optional string engine = "hive"
}
// FileMetadata represents the table-level (in case of unpartitioned) or
partition-level
@@ -725,7 +725,7 @@ struct SetPartitionsStatsRequest {
2: optional bool needMerge, //stats need to be merged with the existing stats
3: optional i64 writeId=-1, // writeId for the current query that
updates the stats
4: optional string validWriteIdList, // valid write id list for the table for
which this struct is being sent
-5: required string engine //engine creating the current request
+5: optional string engine = "hive" //engine creating the current request
}
struct SetPartitionsStatsResponse {
@@ -901,7 +901,7 @@ struct TableStatsRequest {
3: required list<string> colNames
4: optional string catName,
5: optional string validWriteIdList, // valid write id list for the table
for which this struct is being sent
- 6: required string engine, //engine creating the current request
+ 6: optional string engine = "hive", //engine creating the current request
7: optional i64 id=-1 // table id
}
@@ -912,7 +912,7 @@ struct PartitionsStatsRequest {
4: required list<string> partNames,
5: optional string catName,
6: optional string validWriteIdList, // valid write id list for the table for
which this struct is being sent
- 7: required string engine //engine creating the current request
+ 7: optional string engine = "hive" //engine creating the current request
}
// Return type for add_partitions_req
@@ -993,7 +993,7 @@ struct GetPartitionsByNamesRequest {
4: optional bool get_col_stats,
5: optional list<string> processorCapabilities,
6: optional string processorIdentifier,
- 7: optional string engine,
+ 7: optional string engine = "hive",
8: optional string validWriteIdList,
9: optional bool getFileMetadata,
10: optional i64 id=-1, // table id
@@ -1710,7 +1710,7 @@ struct GetTableRequest {
7: optional bool getColumnStats,
8: optional list<string> processorCapabilities,
9: optional string processorIdentifier,
- 10: optional string engine,
+ 10: optional string engine = "hive",
11: optional i64 id=-1 // table id
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 96695ee1eaa..07b4f25c3bc 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -1918,15 +1918,17 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName,
String tableName,
List<String> colNames, String engine) throws NoSuchObjectException,
MetaException, TException,
InvalidInputException, InvalidObjectException {
- return client.get_table_statistics_req(
- new TableStatsRequest(dbName, tableName, colNames,
engine)).getTableStats();
+ TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
+ tsr.setEngine(engine);
+ return client.get_table_statistics_req(new
TableStatsRequest(tsr)).getTableStats();
}
@Override
public List<ColumnStatisticsObj> getTableColumnStatistics(
String dbName, String tableName, List<String> colNames, String engine,
String validWriteIdList)
throws NoSuchObjectException, MetaException, TException {
- TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames,
engine);
+ TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
+ tsr.setEngine(engine);
tsr.setValidWriteIdList(validWriteIdList);
return client.get_table_statistics_req(tsr).getTableStats();
@@ -1937,8 +1939,9 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
String dbName, String tableName, List<String> partNames, List<String>
colNames, String engine)
throws NoSuchObjectException, MetaException, TException {
- return client.get_partitions_statistics_req(
- new PartitionsStatsRequest(dbName, tableName, colNames, partNames,
engine)).getPartStats();
+ PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName,
colNames, partNames);
+ psr.setEngine(engine);
+ return client.get_partitions_statistics_req(new
PartitionsStatsRequest(psr)).getPartStats();
}
@Override
@@ -1946,7 +1949,8 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
String dbName, String tableName, List<String> partNames,
List<String> colNames, String engine, String validWriteIdList)
throws NoSuchObjectException, MetaException, TException {
- PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName,
colNames, partNames, engine);
+ PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName,
colNames, partNames);
+ psr.setEngine(engine);
psr.setValidWriteIdList(validWriteIdList);
return client.get_partitions_statistics_req(
psr).getPartStats();
@@ -2945,7 +2949,8 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
LOG.debug("Columns is empty or partNames is empty : Short-circuiting
stats eval on client side.");
return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
}
- PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames, engine);
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partNames);
+ req.setEngine(engine);
return client.get_aggr_stats_for(req);
}
@@ -2958,7 +2963,8 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
LOG.debug("Columns is empty or partNames is empty : Short-circuiting
stats eval on client side.");
return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
}
- PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partName, engine);
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName,
colNames, partName);
+ req.setEngine(engine);
req.setValidWriteIdList(writeIdList);
return client.get_aggr_stats_for(req);
}