This is an automated email from the ASF dual-hosted git repository.
ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new c917aa9 HIVE-25321: Advance writeid during AlterTableDropPartition
(Kishen Das reviewed by Sourabh and Yu-wen)
c917aa9 is described below
commit c917aa96ebdd6991d7ad6ac4409b077976a2e490
Author: Kishen Das <[email protected]>
AuthorDate: Fri Jul 9 12:54:38 2021 -0700
HIVE-25321: Advance writeid during AlterTableDropPartition (Kishen Das
reviewed by Sourabh and Yu-wen)
---
.../add/AbstractAddPartitionAnalyzer.java | 15 ------
.../drop/AbstractDropPartitionAnalyzer.java | 6 +++
.../drop/AlterTableDropPartitionAnalyzer.java | 14 ++++++
.../drop/AlterViewDropPartitionAnalyzer.java | 12 +++++
.../org/apache/hadoop/hive/ql/TestTxnCommands.java | 54 ++++++++++++++++++++++
5 files changed, 86 insertions(+), 15 deletions(-)
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
index 0736f16..9da2daa 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
@@ -132,19 +132,4 @@ abstract class AbstractAddPartitionAnalyzer extends
AbstractAlterTableAnalyzer {
protected abstract void postProcess(TableName tableName, Table table,
AlterTableAddPartitionDesc desc,
Task<DDLWork> ddlTask) throws SemanticException;
- // Equivalent to acidSinks, but for DDL operations that change data.
- private DDLDescWithWriteId ddlDescWithWriteId;
-
- protected void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) {
- if (this.ddlDescWithWriteId != null) {
- throw new IllegalStateException("ddlDescWithWriteId is already set: " +
this.ddlDescWithWriteId);
- }
- this.ddlDescWithWriteId = descWithWriteId;
- }
-
- @Override
- public DDLDescWithWriteId getAcidDdlDesc() {
- return ddlDescWithWriteId;
- }
-
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java
index 34667e0..b727492 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
+import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -107,10 +108,15 @@ abstract class AbstractDropPartitionAnalyzer extends
AbstractAlterTableAnalyzer
AlterTableDropPartitionDesc desc =
new AlterTableDropPartitionDesc(tableName, partitionSpecs, mustPurge,
replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
desc)));
+
+ postProcess(tableName, table, desc);
+
}
protected abstract boolean expectView();
+ protected abstract void postProcess(TableName tableName, Table table,
AlterTableDropPartitionDesc desc);
+
/**
* Add the table partitions to be modified in the output, so that it is
available for the
* pre-execution hook. If the partition does not exist, throw an error if
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java
index cbb622e..f8632c7 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java
@@ -18,8 +18,13 @@
package org.apache.hadoop.hive.ql.ddl.table.partition.drop;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -36,4 +41,13 @@ public class AlterTableDropPartitionAnalyzer extends
AbstractDropPartitionAnalyz
protected boolean expectView() {
return false;
}
+
+ @Override
+ protected void postProcess(TableName tableName, Table table,
AlterTableDropPartitionDesc desc) {
+ if (!AcidUtils.isTransactionalTable(table)) {
+ return;
+ }
+
+ setAcidDdlDesc(desc);
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java
index ff77da0..9f8961e 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java
@@ -18,8 +18,11 @@
package org.apache.hadoop.hive.ql.ddl.table.partition.drop;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -33,6 +36,15 @@ public class AlterViewDropPartitionAnalyzer extends
AbstractDropPartitionAnalyze
}
@Override
+ protected void postProcess(TableName tableName, Table table,
AlterTableDropPartitionDesc desc) {
+ if (!AcidUtils.isTransactionalTable(table)) {
+ return;
+ }
+
+ setAcidDdlDesc(desc);
+ }
+
+ @Override
protected boolean expectView() {
return true;
}
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 01435f9..29e488b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -432,6 +432,60 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
}
+ /**
+ * If you are disabling or removing this test case, it probably means now we
support exchange partition for
+ * transactional tables. If that is the case, we also have to make sure we
advance the Write IDs during exchange
+ * partition DDL for transactional tables. You can look at
https://github.com/apache/hive/pull/2465 as an example.
+ * @throws Exception
+ */
+ @Test
+ public void exchangePartitionShouldNotWorkForTransactionalTables() throws
Exception {
+ runStatementOnDriver("create database IF NOT EXISTS db1");
+ runStatementOnDriver("create database IF NOT EXISTS db2");
+
+ runStatementOnDriver("CREATE TABLE db1.exchange_part_test1 (f1 string)
PARTITIONED BY (ds STRING)");
+
+ String tableName = "db2.exchange_part_test2";
+ runStatementOnDriver(String.format("CREATE TABLE %s (f1 string)
PARTITIONED BY (ds STRING) " +
+ "TBLPROPERTIES ('transactional'='true',
'transactional_properties'='insert_only')"
+ ,tableName));
+
+ runStatementOnDriver("ALTER TABLE db2.exchange_part_test2 ADD PARTITION
(ds='2013-04-05')");
+
+ try {
+ runStatementOnDriver("ALTER TABLE db1.exchange_part_test1 EXCHANGE
PARTITION (ds='2013-04-05') " +
+ "WITH TABLE db2.exchange_part_test2");
+ Assert.fail("Exchange partition should not be allowed for transaction
tables" );
+ }catch(Exception e) {
+ Assert.assertTrue(e.getMessage().contains("Exchange partition is not
allowed with transactional tables"));
+ }
+ }
+
+ @Test
+ public void testAddAndDropPartitionAdvancingWriteIds() throws Exception {
+ runStatementOnDriver("create database IF NOT EXISTS db1");
+
+ String tableName = "db1.add_drop_partition";
+ IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
+
+ runStatementOnDriver(String.format("CREATE TABLE %s (f1 string)
PARTITIONED BY (ds STRING) " +
+ "TBLPROPERTIES ('transactional'='true',
'transactional_properties'='insert_only')"
+ ,tableName));
+
+ String validWriteIds = msClient.getValidWriteIds(tableName).toString();
+ LOG.info("ValidWriteIds before add partition::"+ validWriteIds);
+ Assert.assertEquals("db1.add_drop_partition:0:9223372036854775807::",
validWriteIds);
+ validWriteIds = msClient.getValidWriteIds(tableName).toString();
+ runStatementOnDriver("ALTER TABLE db1.add_drop_partition ADD PARTITION
(ds='2013-04-05')");
+ validWriteIds = msClient.getValidWriteIds(tableName).toString();
+ LOG.info("ValidWriteIds after add partition::"+ validWriteIds);
+ Assert.assertEquals("db1.add_drop_partition:1:9223372036854775807::",
validWriteIds);
+ runStatementOnDriver("ALTER TABLE db1.add_drop_partition DROP PARTITION
(ds='2013-04-05')");
+ validWriteIds = msClient.getValidWriteIds(tableName).toString();
+ LOG.info("ValidWriteIds after drop partition::"+ validWriteIds);
+ Assert.assertEquals("db1.add_drop_partition:2:9223372036854775807::",
validWriteIds);
+ }
+
@Test
public void testParallelInsertAnalyzeStats() throws Exception {
String tableName = "mm_table";