This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new b0d3503eea0 HIVE-28166: Iceberg: Truncate on branch operates on the
main table. (#5173). (Ayush Saxena, reviewed by Denys Kuzmenko)
b0d3503eea0 is described below
commit b0d3503eea03015d5be9f66e439bee016a176754
Author: Ayush Saxena <[email protected]>
AuthorDate: Thu Apr 4 17:24:55 2024 +0530
HIVE-28166: Iceberg: Truncate on branch operates on the main table.
(#5173). (Ayush Saxena, reviewed by Denys Kuzmenko)
---
.../iceberg/mr/hive/HiveIcebergMetaHook.java | 5 +
.../iceberg/mr/hive/HiveIcebergStorageHandler.java | 6 +
.../queries/positive/truncate_iceberg_branch.q | 48 ++++++
.../results/positive/truncate_iceberg_branch.q.out | 188 +++++++++++++++++++++
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 2 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 12 ++
.../org/apache/hadoop/hive/ql/metadata/Table.java | 2 +-
.../apache/hadoop/hive/ql/parse/HiveTableName.java | 2 +-
.../hadoop/hive/metastore/HiveMetaStoreClient.java | 24 ++-
.../hadoop/hive/metastore/IMetaStoreClient.java | 5 +
.../metastore/HiveMetaStoreClientPreCatalog.java | 6 +
11 files changed, 290 insertions(+), 10 deletions(-)
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index 0703eea9dd9..c3b64554776 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -60,6 +60,7 @@ import
org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.ql.parse.PartitionTransform;
import org.apache.hadoop.hive.ql.parse.TransformSpec;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -679,6 +680,10 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
}
DeleteFiles delete = icebergTable.newDelete();
+ String branchName = context.getProperties().get(Catalogs.SNAPSHOT_REF);
+ if (branchName != null) {
+ delete.toBranch(HiveUtils.getTableSnapshotRef(branchName));
+ }
delete.deleteFromRowFilter(finalExp);
delete.commit();
context.putToProperties("truncateSkipDataDeletion", "true");
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
index f704a5b81c3..8aa833c4c18 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
@@ -1772,6 +1772,10 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
public void validatePartSpec(org.apache.hadoop.hive.ql.metadata.Table
hmsTable, Map<String, String> partitionSpec)
throws SemanticException {
Table table = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
+ if (hmsTable.getSnapshotRef() != null &&
hasUndergonePartitionEvolution(table)) {
+ // for this case we rewrite the query as delete query, so validations
would be done as part of delete.
+ return;
+ }
if (table.spec().isUnpartitioned() && MapUtils.isNotEmpty(partitionSpec)) {
throw new SemanticException("Writing data into a partition fails when
the Iceberg table is unpartitioned.");
@@ -1817,6 +1821,8 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
Table table = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
if (MapUtils.isEmpty(partitionSpec) ||
!hasUndergonePartitionEvolution(table)) {
return true;
+ } else if (hmsTable.getSnapshotRef() != null) {
+ return false;
}
Expression finalExp = generateExpressionFromPartitionSpec(table,
partitionSpec);
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/truncate_iceberg_branch.q
b/iceberg/iceberg-handler/src/test/queries/positive/truncate_iceberg_branch.q
new file mode 100644
index 00000000000..8c45c1fe271
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/queries/positive/truncate_iceberg_branch.q
@@ -0,0 +1,48 @@
+-- SORT_QUERY_RESULTS
+set hive.explain.user=false;
+set hive.fetch.task.conversion=more;
+
+create external table ice01(id int) stored by iceberg stored as orc
tblproperties ('format-version'='2');
+
+insert into ice01 values (1), (2), (3), (4);
+
+select * from ice01;
+
+-- create a branch named branch1
+alter table ice01 create branch branch1;
+select * from default.ice01.branch_branch1;
+
+-- insert some data to branch
+insert into ice01 values (5), (6);
+select * from default.ice01.branch_branch1;
+
+-- truncate the branch
+truncate table default.ice01.branch_branch1;
+select * from default.ice01.branch_branch1;
+
+-- create a partioned iceberg table
+create external table ice02(id int) partitioned by (name string) stored by
iceberg stored as orc tblproperties ('format-version'='2');
+insert into ice02 values (1, 'A'), (2, 'B'), (3, 'A'), (4, 'B');
+
+select * from ice02;
+
+-- create a branch named branch1
+alter table ice02 create branch branch1;
+
+-- insert some data to branch
+insert into default.ice02.branch_branch1 values (5, 'A'), (6, 'C');
+select * from default.ice02.branch_branch1;
+
+-- truncate partition A
+truncate table default.ice02.branch_branch1 partition (name='A');
+
+select * from default.ice02.branch_branch1;
+
+-- check original table is intact.
+select * from ice02;
+
+-- partition evolution
+alter table ice02 set partition spec (id);
+
+truncate table default.ice02.branch_branch1 partition (name='C');
+select * from default.ice02.branch_branch1;
\ No newline at end of file
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/truncate_iceberg_branch.q.out
b/iceberg/iceberg-handler/src/test/results/positive/truncate_iceberg_branch.q.out
new file mode 100644
index 00000000000..eda1d52ea49
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/results/positive/truncate_iceberg_branch.q.out
@@ -0,0 +1,188 @@
+PREHOOK: query: create external table ice01(id int) stored by iceberg stored
as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice01
+POSTHOOK: query: create external table ice01(id int) stored by iceberg stored
as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice01
+PREHOOK: query: insert into ice01 values (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice01
+POSTHOOK: query: insert into ice01 values (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice01
+PREHOOK: query: select * from ice01
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice01
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice01
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice01
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1
+2
+3
+4
+PREHOOK: query: alter table ice01 create branch branch1
+PREHOOK: type: ALTERTABLE_CREATEBRANCH
+PREHOOK: Input: default@ice01
+POSTHOOK: query: alter table ice01 create branch branch1
+POSTHOOK: type: ALTERTABLE_CREATEBRANCH
+POSTHOOK: Input: default@ice01
+PREHOOK: query: select * from default.ice01.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice01
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice01.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice01
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1
+2
+3
+4
+PREHOOK: query: insert into ice01 values (5), (6)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice01
+POSTHOOK: query: insert into ice01 values (5), (6)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice01
+PREHOOK: query: select * from default.ice01.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice01
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice01.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice01
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1
+2
+3
+4
+PREHOOK: query: truncate table default.ice01.branch_branch1
+PREHOOK: type: TRUNCATETABLE
+PREHOOK: Output: default@ice01
+POSTHOOK: query: truncate table default.ice01.branch_branch1
+POSTHOOK: type: TRUNCATETABLE
+POSTHOOK: Output: default@ice01
+PREHOOK: query: select * from default.ice01.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice01
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice01.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice01
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: create external table ice02(id int) partitioned by (name
string) stored by iceberg stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice02
+POSTHOOK: query: create external table ice02(id int) partitioned by (name
string) stored by iceberg stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice02
+PREHOOK: query: insert into ice02 values (1, 'A'), (2, 'B'), (3, 'A'), (4, 'B')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice02
+POSTHOOK: query: insert into ice02 values (1, 'A'), (2, 'B'), (3, 'A'), (4,
'B')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice02
+PREHOOK: query: select * from ice02
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice02
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice02
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 A
+2 B
+3 A
+4 B
+PREHOOK: query: alter table ice02 create branch branch1
+PREHOOK: type: ALTERTABLE_CREATEBRANCH
+PREHOOK: Input: default@ice02
+POSTHOOK: query: alter table ice02 create branch branch1
+POSTHOOK: type: ALTERTABLE_CREATEBRANCH
+POSTHOOK: Input: default@ice02
+PREHOOK: query: insert into default.ice02.branch_branch1 values (5, 'A'), (6,
'C')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice02
+POSTHOOK: query: insert into default.ice02.branch_branch1 values (5, 'A'), (6,
'C')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice02
+PREHOOK: query: select * from default.ice02.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice02
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice02.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 A
+2 B
+3 A
+4 B
+5 A
+6 C
+PREHOOK: query: truncate table default.ice02.branch_branch1 partition
(name='A')
+PREHOOK: type: TRUNCATETABLE
+PREHOOK: Output: default@ice02@name=A
+POSTHOOK: query: truncate table default.ice02.branch_branch1 partition
(name='A')
+POSTHOOK: type: TRUNCATETABLE
+POSTHOOK: Output: default@ice02@name=A
+PREHOOK: query: select * from default.ice02.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice02
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice02.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2 B
+4 B
+6 C
+PREHOOK: query: select * from ice02
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice02
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice02
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 A
+2 B
+3 A
+4 B
+PREHOOK: query: alter table ice02 set partition spec (id)
+PREHOOK: type: ALTERTABLE_SETPARTSPEC
+PREHOOK: Input: default@ice02
+POSTHOOK: query: alter table ice02 set partition spec (id)
+POSTHOOK: type: ALTERTABLE_SETPARTSPEC
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: default@ice02
+PREHOOK: query: truncate table default.ice02.branch_branch1 partition
(name='C')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@ice02@name=C
+POSTHOOK: query: truncate table default.ice02.branch_branch1 partition
(name='C')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@ice02@name=C
+PREHOOK: query: select * from default.ice02.branch_branch1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice02
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice02.branch_branch1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice02
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2 B
+4 B
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 23aea1d3181..f44fb8f7f68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1530,7 +1530,7 @@ public class Hive {
List<String> partNames = ((null == partSpec)
? null : getPartitionNames(table.getDbName(),
table.getTableName(), partSpec, (short) -1));
if (snapshot == null) {
- getMSC().truncateTable(table.getDbName(), table.getTableName(),
partNames);
+ getMSC().truncateTable(table.getFullTableName(), partNames);
} else {
boolean truncateUseBase = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_ACID_TRUNCATE_USE_BASE)
|| HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED);
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index ce725a5cdb3..d4b1f181869 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -210,6 +210,18 @@ public class SessionHiveMetaStoreClient extends
HiveMetaStoreClientWithLocalCach
super.truncateTable(dbName, tableName, partNames);
}
+ @Override
+ public void truncateTable(TableName tableName, List<String> partNames)
throws TException {
+ // First try temp table
+ org.apache.hadoop.hive.metastore.api.Table table =
getTempTable(tableName.getDb(), tableName.getTable());
+ if (table != null) {
+ truncateTempTable(table);
+ return;
+ }
+ // Try underlying client
+ super.truncateTable(tableName, partNames);
+ }
+
@Override
public void truncateTable(String dbName, String tableName,
List<String> partNames, String validWriteIds, long writeId)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 21752177e63..c9ed3f9b643 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -323,7 +323,7 @@ public class Table implements Serializable {
}
public TableName getFullTableName() {
- return new TableName(getCatName(), getDbName(), getTableName());
+ return new TableName(getCatName(), getDbName(), getTableName(),
getSnapshotRef());
}
final public Path getDataLocation() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java
index 166baf14924..d0a723669ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java
@@ -38,7 +38,7 @@ public final class HiveTableName extends TableName {
* @throws SemanticException
*/
public static TableName of(Table table) throws SemanticException {
- return ofNullable(table.getTableName(), table.getDbName());
+ return ofNullable(table.getTableName(), table.getDbName(),
table.getSnapshotRef());
}
/**
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 862096cb4d8..ec7d8bec37d 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -121,6 +121,8 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
public static final String MANUALLY_INITIATED_COMPACTION = "manual";
public static final String TRUNCATE_SKIP_DATA_DELETION =
"truncateSkipDataDeletion";
public static final String SKIP_DROP_PARTITION = "dropPartitionSkip";
+
+ public static final String SNAPSHOT_REF = "snapshot_ref";
public static final String RENAME_PARTITION_MAKE_COPY =
"renamePartitionMakeCopy";
/**
@@ -2079,34 +2081,42 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
@Override
public void truncateTable(String dbName, String tableName, List<String>
partNames,
String validWriteIds, long writeId, boolean deleteData) throws
TException {
- truncateTableInternal(getDefaultCatalog(conf),
- dbName, tableName, partNames, validWriteIds, writeId, deleteData);
+ truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, null,
partNames, validWriteIds, writeId,
+ deleteData);
}
@Override
public void truncateTable(String dbName, String tableName, List<String>
partNames,
String validWriteIds, long writeId) throws TException {
- truncateTableInternal(getDefaultCatalog(conf),
- dbName, tableName, partNames, validWriteIds, writeId, true);
+ truncateTable(dbName, tableName, partNames, validWriteIds, writeId, true);
}
@Override
public void truncateTable(String dbName, String tableName, List<String>
partNames) throws TException {
- truncateTableInternal(getDefaultCatalog(conf), dbName, tableName,
partNames, null, -1, true);
+ truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames);
+ }
+
+ @Override
+ public void truncateTable(TableName table, List<String> partNames) throws
TException {
+ truncateTableInternal(table.getCat(), table.getDb(), table.getTable(),
table.getTableMetaRef(), partNames,
+ null, -1, true);
}
@Override
public void truncateTable(String catName, String dbName, String tableName,
List<String> partNames)
throws TException {
- truncateTableInternal(catName, dbName, tableName, partNames, null, -1,
true);
+ truncateTable(TableName.fromString(tableName, catName, dbName), partNames);
}
- private void truncateTableInternal(String catName, String dbName, String
tableName,
+ private void truncateTableInternal(String catName, String dbName, String
tableName, String ref,
List<String> partNames, String validWriteIds, long writeId, boolean
deleteData)
throws TException {
Table table = getTable(catName, dbName, tableName);
HiveMetaHook hook = getHook(table);
EnvironmentContext context = new EnvironmentContext();
+ if (ref != null) {
+ context.putToProperties(SNAPSHOT_REF, ref);
+ }
context.putToProperties(TRUNCATE_SKIP_DATA_DELETION,
Boolean.toString(!deleteData));
if (hook != null) {
hook.preTruncateTable(table, context, partNames);
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index d247c91c918..2dd0e1c823f 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -29,6 +29,7 @@ import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
@@ -555,8 +556,11 @@ public interface IMetaStoreClient extends AutoCloseable {
* @throws MetaException Failure in the RDBMS or storage
* @throws TException Thrift transport exception
*/
+ @Deprecated
void truncateTable(String dbName, String tableName, List<String> partNames)
throws MetaException, TException;
+ void truncateTable(TableName table, List<String> partNames) throws
TException;
+
void truncateTable(String dbName, String tableName, List<String> partNames,
String validWriteIds, long writeId) throws TException;
@@ -574,6 +578,7 @@ public interface IMetaStoreClient extends AutoCloseable {
* @throws MetaException Failure in the RDBMS or storage
* @throws TException Thrift transport exception
*/
+ @Deprecated
void truncateTable(String catName, String dbName, String tableName,
List<String> partNames)
throws MetaException, TException;
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 07b4f25c3bc..a26c0381f3f 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.api.*;
@@ -3904,6 +3905,11 @@ public class HiveMetaStoreClientPreCatalog implements
IMetaStoreClient, AutoClos
throw new UnsupportedOperationException();
}
+ @Override
+ public void truncateTable(TableName table, List<String> partNames) throws
MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public void truncateTable(String dbName, String tableName,
List<String> partNames, String validWriteIds, long writeId, boolean
deleteData)