This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 6aff0d006e8 HIVE-25366: Reduce number of Table calls in
updatePartitonColStatsInternal (Dmitriy Fingerman, reviewed by Denys Kuzmenko)
6aff0d006e8 is described below
commit 6aff0d006e81d26cc8dc68dec546eb9650a8e941
Author: Dmitriy Fingerman <[email protected]>
AuthorDate: Mon May 22 02:35:50 2023 -0400
HIVE-25366: Reduce number of Table calls in updatePartitonColStatsInternal
(Dmitriy Fingerman, reviewed by Denys Kuzmenko)
Closes #4319
---
.../hcatalog/listener/DummyRawStoreFailEvent.java | 13 +++++++++
.../apache/hadoop/hive/metastore/AlterHandler.java | 5 ++--
.../apache/hadoop/hive/metastore/HMSHandler.java | 26 +++++++++--------
.../hadoop/hive/metastore/HiveAlterHandler.java | 14 +++++----
.../apache/hadoop/hive/metastore/ObjectStore.java | 19 +++++++++----
.../org/apache/hadoop/hive/metastore/RawStore.java | 11 ++++++--
.../hadoop/hive/metastore/cache/CachedStore.java | 33 ++++++++++++++++++----
.../metastore/DummyRawStoreControlledCommit.java | 14 +++++++++
.../metastore/DummyRawStoreForJdoConnection.java | 16 +++++++++--
.../hadoop/hive/metastore/TestObjectStore.java | 4 ++-
.../hadoop/hive/metastore/TestOldSchema.java | 4 ++-
.../hive/metastore/cache/TestCachedStore.java | 4 ++-
12 files changed, 127 insertions(+), 36 deletions(-)
diff --git
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 464555da4d6..a242284250e 100644
---
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -117,6 +117,7 @@ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
import org.apache.hadoop.hive.metastore.api.ReplicationMetricList;
import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -884,6 +885,13 @@ public class DummyRawStoreFailEvent implements RawStore,
Configurable {
return objectStore.updateTableColumnStatistics(statsObj, validWriteIds,
writeId);
}
+ @Override
+ public Map<String, String> updatePartitionColumnStatistics(Table table,
MTable mTable, ColumnStatistics statsObj,
+ List<String> partVals, String validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
+ return objectStore.updatePartitionColumnStatistics(table, mTable,
statsObj, partVals, validWriteIds, writeId);
+ }
+
@Override
public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
statsObj,
List<String> partVals, String validWriteIds, long writeId)
@@ -1592,6 +1600,11 @@ public class DummyRawStoreFailEvent implements RawStore,
Configurable {
objectStore.dropPackage(request);
}
+ @Override
+ public MTable ensureGetMTable(String s, String s1, String s2) throws
NoSuchObjectException {
+ return objectStore.ensureGetMTable(s, s1, s2);
+ }
+
@Override
public Map<String, Map<String, String>>
updatePartitionColumnStatisticsInBatch(
Map<String, ColumnStatistics> partColStatsMap,
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index d903a912b91..6edbef39397 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -25,6 +25,7 @@ import
org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
@@ -118,7 +119,7 @@ public interface AlterHandler extends Configurable {
Partition alterPartition(final RawStore msdb, Warehouse wh, final String
dbname,
final String name, final List<String> part_vals, final Partition new_part,
EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException;
+ throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException, NoSuchObjectException;
/**
* handles alter partition
@@ -147,7 +148,7 @@ public interface AlterHandler extends Configurable {
final String dbname, final String name, final
List<String> part_vals,
final Partition new_part, EnvironmentContext
environmentContext,
IHMSHandler handler, String validWriteIds)
- throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException;
+ throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException, NoSuchObjectException;
/**
* @deprecated As of release 3.0.0. Replaced by {@link
#alterPartitions(RawStore, Warehouse, String,
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index 9033358a7cd..66303f12ef8 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -54,6 +54,7 @@ import
org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.txn.*;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
@@ -4106,7 +4107,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
+ TableName.getQualified(catName, dbName, tblName) +
" does not exist");
}
-
+ MTable mTable = getMS().ensureGetMTable(catName, dbName, tblName);
db = ms.getDatabase(catName, dbName);
if (!parts.isEmpty()) {
@@ -4187,7 +4188,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
new long[0], new BitSet(), writeId);
validWriteIds = validWriteIdList.toString();
}
- updatePartitonColStatsInternal(tbl, partColStats, validWriteIds,
writeId);
+ updatePartitonColStatsInternal(tbl, mTable, partColStats,
validWriteIds, writeId);
}
success = ms.commitTransaction();
@@ -7021,7 +7022,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
colStats.setStatsObj(colStats.getStatsObj());
}
- private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics
colStats,
+ private boolean updatePartitonColStatsInternal(Table tbl, MTable mTable,
ColumnStatistics colStats,
String validWriteIds, long
writeId)
throws MetaException, InvalidObjectException, NoSuchObjectException,
InvalidInputException {
normalizeColStatsInput(colStats);
@@ -7037,13 +7038,12 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
List<String> partVals;
boolean committed = false;
getMS().openTransaction();
-
+
try {
- if (tbl == null) {
- tbl = getTable(catName, dbName, tableName);
- }
+ tbl = Optional.ofNullable(tbl).orElse(getTable(catName, dbName,
tableName));
+ mTable =
Optional.ofNullable(mTable).orElse(getMS().ensureGetMTable(catName, dbName,
tableName));
partVals = getPartValsFromName(tbl, csd.getPartName());
- parameters = getMS().updatePartitionColumnStatistics(colStats, partVals,
validWriteIds, writeId);
+ parameters = getMS().updatePartitionColumnStatistics(tbl, mTable,
colStats, partVals, validWriteIds, writeId);
if (parameters != null) {
if (transactionalListeners != null &&
!transactionalListeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
@@ -7139,7 +7139,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
@Override
public boolean update_partition_column_statistics(ColumnStatistics colStats)
throws TException {
// Deprecated API.
- return updatePartitonColStatsInternal(null, colStats, null, -1);
+ return updatePartitonColStatsInternal(null, null, colStats, null, -1);
}
@@ -7155,7 +7155,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
throw new InvalidInputException("Merge is not supported for
non-aggregate stats");
}
ColumnStatistics colStats = req.getColStatsIterator().next();
- boolean ret = updatePartitonColStatsInternal(null, colStats,
+ boolean ret = updatePartitonColStatsInternal(null, null, colStats,
req.getValidWriteIdList(), req.getWriteId());
return new SetPartitionsStatsResponse(ret);
}
@@ -9169,6 +9169,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
colNames, newStatsMap, request);
} else { // No merge.
Table t = getTable(catName, dbName, tableName);
+ MTable mTable = getMS().ensureGetMTable(catName, dbName, tableName);
// We don't short-circuit on errors here anymore. That can leave acid
stats invalid.
if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) {
ret = updatePartitionColStatsInBatch(t, newStatsMap,
@@ -9176,7 +9177,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
} else {
for (Map.Entry<String, ColumnStatistics> entry :
newStatsMap.entrySet()) {
// We don't short-circuit on errors here anymore. That can leave
acid stats invalid.
- ret = updatePartitonColStatsInternal(t, entry.getValue(),
+ ret = updatePartitonColStatsInternal(t, mTable, entry.getValue(),
request.getValidWriteIdList(), request.getWriteId()) &&
ret;
}
}
@@ -9214,6 +9215,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
}
Table t = getTable(catName, dbName, tableName);
+ MTable mTable = getMS().ensureGetMTable(catName, dbName, tableName);
Map<String, ColumnStatistics> statsMap = new HashMap<>();
boolean useDirectSql = MetastoreConf.getBoolVar(getConf(),
ConfVars.TRY_DIRECT_SQL);
for (Map.Entry<String, ColumnStatistics> entry : newStatsMap.entrySet())
{
@@ -9239,7 +9241,7 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
if (useDirectSql) {
statsMap.put(csNew.getStatsDesc().getPartName(), csNew);
} else {
- result = updatePartitonColStatsInternal(t, csNew,
+ result = updatePartitonColStatsInternal(t, mTable, csNew,
request.getValidWriteIdList(), request.getWriteId()) &&
result;
}
} else if (isInvalidTxnStats) {
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index fc413504ec1..4f03e5d5b58 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -390,12 +391,14 @@ public class HiveAlterHandler implements AlterHandler {
}
}
Deadline.checkTimeout();
+ Table table = msdb.getTable(catName, newDbName, newTblName);
+ MTable mTable = msdb.ensureGetMTable(catName, newDbName, newTblName);
for (Entry<Partition, ColumnStatistics> partColStats :
columnStatsNeedUpdated.entries()) {
ColumnStatistics newPartColStats = partColStats.getValue();
newPartColStats.getStatsDesc().setDbName(newDbName);
newPartColStats.getStatsDesc().setTableName(newTblName);
- msdb.updatePartitionColumnStatistics(newPartColStats,
partColStats.getKey().getValues(),
- writeIdList, newt.getWriteId());
+ msdb.updatePartitionColumnStatistics(table, mTable,
newPartColStats,
+ partColStats.getKey().getValues(), writeIdList,
newt.getWriteId());
}
} else {
msdb.alterTable(catName, dbname, name, newt, writeIdList);
@@ -554,7 +557,7 @@ public class HiveAlterHandler implements AlterHandler {
public Partition alterPartition(final RawStore msdb, Warehouse wh, final
String dbname,
final String name, final List<String> part_vals, final Partition new_part,
EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException {
+ throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException, NoSuchObjectException {
return alterPartition(msdb, wh, MetaStoreUtils.getDefaultCatalog(conf),
dbname, name, part_vals, new_part,
environmentContext, null, null);
}
@@ -563,7 +566,7 @@ public class HiveAlterHandler implements AlterHandler {
public Partition alterPartition(RawStore msdb, Warehouse wh, String catName,
String dbname,
String name, List<String> part_vals, final Partition new_part,
EnvironmentContext environmentContext, IHMSHandler handler, String
validWriteIds)
- throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException {
+ throws InvalidOperationException, InvalidObjectException,
AlreadyExistsException, MetaException, NoSuchObjectException {
boolean success = false;
Partition oldPart;
List<TransactionalMetaStoreEventListener> transactionalListeners = null;
@@ -648,6 +651,7 @@ public class HiveAlterHandler implements AlterHandler {
throw new InvalidObjectException(
"Unable to alter partition because table or database does not
exist.");
}
+ MTable mTable = msdb.ensureGetMTable(catName, dbname, name);
try {
oldPart = msdb.getPartition(catName, dbname, name, part_vals);
} catch (NoSuchObjectException e) {
@@ -766,7 +770,7 @@ public class HiveAlterHandler implements AlterHandler {
for (ColumnStatistics cs : multiColumnStats) {
cs.getStatsDesc().setPartName(newPartName);
try {
- msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
+ msdb.updatePartitionColumnStatistics(tbl, mTable, cs,
new_part.getValues(),
validWriteIds, new_part.getWriteId());
} catch (InvalidInputException iie) {
throw new InvalidOperationException("Unable to update partition
stats in table rename." + iie);
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 1fbe24eec37..d6f6e165ace 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2064,7 +2064,7 @@ public class ObjectStore implements RawStore,
Configurable {
Query query = null;
try {
openTransaction();
- catName = normalizeIdentifier(catName);
+ catName =
normalizeIdentifier(Optional.ofNullable(catName).orElse(getDefaultCatalog(conf)));
db = normalizeIdentifier(db);
table = normalizeIdentifier(table);
query = pm.newQuery(MTable.class,
@@ -4874,7 +4874,8 @@ public class ObjectStore implements RawStore,
Configurable {
* @param tblName Table name.
* @return Table object.
*/
- private MTable ensureGetMTable(String catName, String dbName, String tblName)
+ @Override
+ public MTable ensureGetMTable(String catName, String dbName, String tblName)
throws NoSuchObjectException {
MTable mtable = getMTable(catName, dbName, tblName);
if (mtable == null) {
@@ -9924,7 +9925,7 @@ public class ObjectStore implements RawStore,
Configurable {
}
@Override
- public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
colStats,
+ public Map<String, String> updatePartitionColumnStatistics(Table table,
MTable mTable, ColumnStatistics colStats,
List<String> partVals, String validWriteIds, long writeId)
throws MetaException, NoSuchObjectException, InvalidObjectException,
InvalidInputException {
boolean committed = false;
@@ -9934,8 +9935,6 @@ public class ObjectStore implements RawStore,
Configurable {
List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() :
getDefaultCatalog(conf);
- MTable mTable = ensureGetMTable(catName, statsDesc.getDbName(),
statsDesc.getTableName());
- Table table = convertToTable(mTable);
Partition partition = convertToPart(getMPartition(
catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals,
mTable), false);
List<String> colNames = new ArrayList<>();
@@ -9994,6 +9993,16 @@ public class ObjectStore implements RawStore,
Configurable {
}
}
+ @Override
+ public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
colStats,
+ List<String> partVals, String validWriteIds, long writeId)
+ throws MetaException, NoSuchObjectException, InvalidObjectException,
InvalidInputException {
+ ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+ Table table = getTable(statsDesc.getCatName(), statsDesc.getDbName(),
statsDesc.getTableName());
+ MTable mTable = ensureGetMTable(statsDesc.getCatName(),
statsDesc.getDbName(), statsDesc.getTableName());
+ return updatePartitionColumnStatistics(table, mTable, colStats, partVals,
validWriteIds, writeId);
+ }
+
@Override
public Map<String, Map<String, String>>
updatePartitionColumnStatisticsInBatch(
Map<String,
ColumnStatistics> partColStatsMap,
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index b0f2df9c998..69073707f25 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -113,6 +113,7 @@ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -1151,8 +1152,13 @@ public interface RawStore extends Configurable {
* @throws InvalidInputException unable to record the stats for the table
*/
Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
statsObj,
- List<String> partVals, String validWriteIds, long writeId)
- throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException;
+ List<String> partVals, String validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException;
+
+ Map<String, String> updatePartitionColumnStatistics(Table table, MTable
mTable,
+ ColumnStatistics statsObj, List<String> partVals,
+ String validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException;
/**
* Returns the relevant column statistics for a given column in a given
table in a given database
@@ -2201,4 +2207,5 @@ public interface RawStore extends Configurable {
Package findPackage(GetPackageRequest request);
List<String> listPackages(ListPackageRequest request);
void dropPackage(DropPackageRequest request);
+ public MTable ensureGetMTable(String catName, String dbName, String tblName)
throws NoSuchObjectException;
}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 1d475605b3b..38deb9f8a5c 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -27,6 +27,7 @@ import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
import java.util.Stack;
import java.util.concurrent.Executors;
@@ -64,6 +65,7 @@ import
org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFa
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.messaging.*;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
@@ -1276,7 +1278,7 @@ public class CachedStore implements RawStore,
Configurable {
@Override public Table getTable(String catName, String dbName, String
tblName, String validWriteIds, long tableId)
throws MetaException {
- catName = normalizeIdentifier(catName);
+ catName =
normalizeIdentifier(Optional.ofNullable(catName).orElse(getDefaultCatalog(conf)));
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents &&
rawStore.isActiveTransaction())) {
@@ -2226,10 +2228,26 @@ public class CachedStore implements RawStore,
Configurable {
}
@Override public Map<String, String>
updatePartitionColumnStatistics(ColumnStatistics colStats, List<String>
partVals,
- String validWriteIds, long writeId)
- throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
- Map<String, String> newParams =
- rawStore.updatePartitionColumnStatistics(colStats, partVals,
validWriteIds, writeId);
+ String
validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
+ return updatePartitionColumnStatisticsInternal(null, null, colStats,
partVals, validWriteIds, writeId);
+ }
+
+ @Override public Map<String, String> updatePartitionColumnStatistics(Table
table, MTable mTable,
+
ColumnStatistics colStats, List<String> partVals,
+ String
validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
+ return updatePartitionColumnStatisticsInternal(table, mTable, colStats,
partVals, validWriteIds, writeId);
+ }
+
+ private Map<String, String> updatePartitionColumnStatisticsInternal(Table
table, MTable mTable,
+
ColumnStatistics colStats, List<String> partVals,
+ String
validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
+ ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+ table = Optional.ofNullable(table).orElse(getTable(statsDesc.getCatName(),
statsDesc.getDbName(), statsDesc.getTableName()));
+ mTable =
Optional.ofNullable(mTable).orElse(ensureGetMTable(statsDesc.getCatName(),
statsDesc.getDbName(), statsDesc.getTableName()));
+ Map<String, String> newParams =
rawStore.updatePartitionColumnStatistics(table, mTable, colStats, partVals,
validWriteIds, writeId);
// in case of event based cache update, cache is updated during commit txn
if (newParams != null && !canUseEvents) {
updatePartitionColumnStatisticsInCache(colStats, newParams, partVals);
@@ -3340,6 +3358,11 @@ public class CachedStore implements RawStore,
Configurable {
rawStore.dropPackage(request);
}
+ @Override
+ public MTable ensureGetMTable(String catName, String dbName, String tblName)
throws NoSuchObjectException {
+ return rawStore.ensureGetMTable(catName, dbName, tblName);
+ }
+
private boolean shouldGetConstraintFromRawStore(String catName, String
dbName, String tblName) {
return !shouldCacheTable(catName, dbName, tblName) || (canUseEvents &&
rawStore.isActiveTransaction())
|| !sharedCache.isTableConstraintValid(catName, dbName, tblName);
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index bc19b961e91..46a638fe468 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -113,6 +113,7 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -827,6 +828,14 @@ public class DummyRawStoreControlledCommit implements
RawStore, Configurable {
return objectStore.updateTableColumnStatistics(statsObj, validWriteIds,
writeId);
}
+ @Override
+ public Map<String, String> updatePartitionColumnStatistics(Table table,
MTable mTable, ColumnStatistics statsObj,
+ List<String>
partVals, String validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.updatePartitionColumnStatistics(table, mTable,
statsObj, partVals, validWriteIds, writeId);
+ }
+
@Override
public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
statsObj,
List<String> partVals, String validWriteIds, long writeId)
@@ -1532,6 +1541,11 @@ public class DummyRawStoreControlledCommit implements
RawStore, Configurable {
objectStore.dropPackage(request);
}
+ @Override
+ public MTable ensureGetMTable(String catName, String dbName, String tblName)
throws NoSuchObjectException {
+ return objectStore.ensureGetMTable(catName, dbName, catName);
+ }
+
@Override
public Map<String, Map<String, String>>
updatePartitionColumnStatisticsInBatch(
Map<String, ColumnStatistics> partColStatsMap,
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 52ae4b8b12c..db443e12811 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -128,6 +128,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
import org.apache.thrift.TException;
@@ -898,12 +899,18 @@ public class DummyRawStoreForJdoConnection implements
RawStore {
}
@Override
- public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics
statsObj,List<String> partVals,
- String validWriteIds, long writeId)
+ public Map<String, String> updatePartitionColumnStatistics(Table table,
MTable mTable,
+ ColumnStatistics statsObj, List<String> partVals, String validWriteIds,
long writeId)
throws NoSuchObjectException, MetaException, InvalidObjectException {
return null;
}
+ @Override public Map<String, String>
updatePartitionColumnStatistics(ColumnStatistics statsObj, List<String>
partVals,
+ String validWriteIds, long writeId)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
InvalidInputException {
+ return null;
+ }
+
@Override
public void verifySchema() throws MetaException {
}
@@ -1532,6 +1539,11 @@ public class DummyRawStoreForJdoConnection implements
RawStore {
public void dropPackage(DropPackageRequest request) {
}
+ @Override
+ public MTable ensureGetMTable(String catName, String dbName, String tblName)
throws NoSuchObjectException {
+ return null;
+ }
+
@Override
public Map<String, Map<String, String>>
updatePartitionColumnStatisticsInBatch(
Map<String, ColumnStatistics> partColStatsMap,
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index bc9e04a227b..1a5d8715174 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
import org.apache.hadoop.hive.metastore.model.MNotificationLog;
import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
import org.junit.Assert;
@@ -819,6 +820,7 @@ public class TestObjectStore {
try (AutoCloseable c = deadline()) {
objectStore.createTable(tbl1);
}
+ MTable mTable1 = objectStore.ensureGetMTable(tbl1.getCatName(),
tbl1.getDbName(), tbl1.getTableName());
PrivilegeBag privilegeBag = new PrivilegeBag();
// Create partitions for the partitioned table
for(int i=0; i < 3; i++) {
@@ -877,7 +879,7 @@ public class TestObjectStore {
statsObjList.add(partStats);
try (AutoCloseable c = deadline()) {
- objectStore.updatePartitionColumnStatistics(stats, part.getValues(),
null, -1);
+ objectStore.updatePartitionColumnStatistics(tbl1, mTable1, stats,
part.getValues(), null, -1);
}
}
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 14110c6dbe1..6928aa55a96 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.junit.After;
import org.junit.Assert;
@@ -148,6 +149,7 @@ public class TestOldSchema {
Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0,
sd, partCols,
Collections.emptyMap(), null, null, null);
store.createTable(table);
+ MTable mTable = store.ensureGetMTable(table.getCatName(),
table.getDbName(), table.getTableName());
Deadline.startTimer("getPartition");
for (int i = 0; i < 10; i++) {
@@ -178,7 +180,7 @@ public class TestOldSchema {
obj.setStatsData(data);
cs.addToStatsObj(obj);
cs.setEngine(ENGINE);
- store.updatePartitionColumnStatistics(cs, partVal, null, -1);
+ store.updatePartitionColumnStatistics(table, mTable, cs, partVal, null,
-1);
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index fbd5c25f937..4b5e1813f0b 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -40,6 +40,7 @@ import
org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
import org.apache.hadoop.hive.metastore.columnstats.ColStatsBuilder;
import
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.junit.After;
import org.junit.Assert;
@@ -345,6 +346,7 @@ import static
org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
Table salesTable =
createTable(tpcdsdb.getName(), "store_sales",
createStorageDescriptor(columns), partitionsColumns);
objectStore.createTable(salesTable);
+ MTable salesMTable = objectStore.ensureGetMTable(salesTable.getCatName(),
salesTable.getDbName(), salesTable.getTableName());
Map<String, ColumnStatisticsData> partitionStats = new HashMap<>();
ColumnStatisticsData data1 = new
ColStatsBuilder<>(long.class).numNulls(100).numDVs(50)
@@ -363,7 +365,7 @@ import static
org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
partNames.add(pName);
ColumnStatistics stats = createColumnStatistics(pStat.getValue(),
salesTable, soldDateCol, pName);
- objectStore.updatePartitionColumnStatistics(stats, partitionValue, null,
-1);
+ objectStore.updatePartitionColumnStatistics(salesTable, salesMTable,
stats, partitionValue, null, -1);
}
List<ColumnStatistics> rawStats = objectStore