This is an automated email from the ASF dual-hosted git repository.
zhangbutao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new b75a59779dc HIVE-27914: Fix the missing partitions judgement in
drop_partitions_req (#4905)(Wechar Yu, reviewed by Butao Zhang)
b75a59779dc is described below
commit b75a59779dcf9c5ef74890c8916dbe9a2c13aef4
Author: Wechar Yu <[email protected]>
AuthorDate: Thu Jan 25 18:38:53 2024 +0800
HIVE-27914: Fix the missing partitions judgement in drop_partitions_req
(#4905)(Wechar Yu, reviewed by Butao Zhang)
---
.../apache/hadoop/hive/ql/metadata/TestHive.java | 43 ++++++++++++++++++++++
.../apache/hadoop/hive/metastore/HMSHandler.java | 16 +++++---
2 files changed, 54 insertions(+), 5 deletions(-)
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index f823b324b0c..bb7f754fc50 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -32,6 +32,7 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -51,7 +52,10 @@ import
org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.events.InsertEvent;
+import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
@@ -682,6 +686,45 @@ public class TestHive {
}
}
+ @Test
+ public void testDropMissingPartitionsByFilter() throws Throwable {
+ String dbName = Warehouse.DEFAULT_DATABASE_NAME;
+ String tableName = "table_for_testDropMissingPartitionsByFilter";
+
+ Table table = createPartitionedTable(dbName, tableName);
+ for (int i = 10; i <= 12; i++) {
+ Map<String, String> partitionSpec = new ImmutableMap.Builder<String,
String>()
+ .put("ds", "20231129")
+ .put("hr", String.valueOf(i))
+ .build();
+ hm.createPartition(table, partitionSpec);
+ }
+
+ List<Partition> partitions = hm.getPartitions(table);
+ assertEquals(3, partitions.size());
+
+ // drop partitions by filter with missing predicate
+ try {
+ List<Pair<Integer, byte[]>> partExprs = new ArrayList<>();
+ ExprNodeColumnDesc column = new ExprNodeColumnDesc(
+ TypeInfoFactory.stringTypeInfo, "ds", null, true);
+ List<String> values = Arrays.asList("20231130", "20231129");
+ for (int i = 0; i < values.size(); i++) {
+ ExprNodeGenericFuncDesc expr = PartitionUtils.makeBinaryPredicate(
+ "=", column, new
ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, values.get(i)));
+ partExprs.add(Pair.of(i,
SerializationUtilities.serializeObjectWithTypeInformation(expr)));
+ }
+ hm.dropPartitions(dbName, tableName, partExprs,
PartitionDropOptions.instance());
+ fail("Expected exception");
+ } catch (HiveException e) {
+ // expected
+ assertEquals("Some partitions to drop are missing",
e.getCause().getMessage());
+ assertEquals(3, hm.getPartitions(table).size());
+ } finally {
+ cleanUpTableQuietly(dbName, tableName);
+ }
+ }
+
/**
* Test that tables set up with auto-purge skip trash-directory when
tables/partitions are dropped.
* @throws Throwable
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index a4367d5716a..3573ed23a7a 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -5183,8 +5183,8 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
mustPurge = isMustPurge(envContext, tbl);
tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl,
deleteData);
writeId = getWriteId(envContext);
-
- int minCount = 0;
+
+ boolean hasMissingParts = false;
RequestPartsSpec spec = request.getParts();
List<String> partNames = null;
@@ -5192,7 +5192,6 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
// Dropping by expressions.
parts = new ArrayList<>(spec.getExprs().size());
for (DropPartitionsExpr expr : spec.getExprs()) {
- ++minCount; // At least one partition per expression, if not ifExists
List<Partition> result = new ArrayList<>();
boolean hasUnknown = ms.getPartitionsByExpr(catName, dbName,
tblName, result,
new GetPartitionsArgs.GetPartitionsArgsBuilder()
@@ -5213,20 +5212,27 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
}
}
}
+ if (result.isEmpty()) {
+ hasMissingParts = true;
+ if (!ifExists) {
+ // fail-fast for missing partition expr
+ break;
+ }
+ }
parts.addAll(result);
}
} else if (spec.isSetNames()) {
partNames = spec.getNames();
- minCount = partNames.size();
parts = ms.getPartitionsByNames(catName, dbName, tblName,
new GetPartitionsArgs.GetPartitionsArgsBuilder()
.partNames(partNames).skipColumnSchemaForPartition(request.isSkipColumnSchemaForPartition())
.build());
+ hasMissingParts = (parts.size() != partNames.size());
} else {
throw new MetaException("Partition spec is not set");
}
- if ((parts.size() < minCount) && !ifExists) {
+ if (hasMissingParts && !ifExists) {
throw new NoSuchObjectException("Some partitions to drop are missing");
}