[
https://issues.apache.org/jira/browse/HIVE-26804?focusedWorklogId=840961&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-840961
]
ASF GitHub Bot logged work on HIVE-26804:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 23/Jan/23 06:17
Start Date: 23/Jan/23 06:17
Worklog Time Spent: 10m
Work Description: rkirtir commented on code in PR #3880:
URL: https://github.com/apache/hive/pull/3880#discussion_r1083678902
##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java:
##########
@@ -3411,6 +3415,73 @@ public void testShowCompactionOrder() throws Exception {
Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(5).getState());
}
+
+ @Test
+ public void testAbortCompaction() throws Exception {
+
+ d.destroy();
+ hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ d = new Driver(hiveConf);
+ //generate some compaction history
+ runStatementOnDriver("drop database if exists mydb1 cascade");
+ runStatementOnDriver("create database mydb1");
+
+ runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int)
partitioned by (p string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1')
compact 'MAJOR'");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ TestTxnCommands2.runCleaner(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3')
compact 'MAJOR'");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+ TestTxnCommands2.runWorker(hiveConf);
+ TestTxnCommands2.runCleaner(hiveConf);
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(11,12,'p1'),(13,14,'p1'),(11,12,'p2'),(13,14,'p2'),(11,12,'p3'),(13,14,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1')
compact 'MINOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ runStatementOnDriver("create table mydb1.tbl1 " + "(a int, b int)
partitioned by (ds string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl1" + " PARTITION(ds) " +
+ "
values(1,2,'today'),(3,4,'today'),(1,2,'tomorrow'),(3,4,'tomorrow'),(1,2,'yesterday'),(3,4,'yesterday')");
+ runStatementOnDriver("alter table mydb1.tbl1" + " PARTITION(ds='today')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ runStatementOnDriver("drop table if exists T1");
+ runStatementOnDriver("create table T1 (a int, b int) stored as orc
TBLPROPERTIES ('transactional'='true')");
+ runStatementOnDriver("insert into T1 values(0,2)");//makes delta_1_1 in T1
+ runStatementOnDriver("insert into T1 values(1,4)");//makes delta_2_2 in T2
+
+ //create failed compaction attempt so that compactor txn is aborted
+ HiveConf.setBoolVar(hiveConf,
HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
+ runStatementOnDriver("alter table T1 compact 'minor'");
+ TestTxnCommands2.runWorker(hiveConf);
+ // Verify compaction order
+ List<ShowCompactResponseElement> compacts =
+ txnHandler.showCompact(new ShowCompactRequest()).getCompacts();
+ Assert.assertEquals(6, compacts.size());
+ Assert.assertEquals(TxnStore.INITIATED_RESPONSE,
compacts.get(0).getState());
+ Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(1).getState());
+ Assert.assertEquals(TxnStore.CLEANING_RESPONSE,
compacts.get(2).getState());
+ Assert.assertEquals(TxnStore.CLEANING_RESPONSE,
compacts.get(3).getState());
+ Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE,
compacts.get(4).getState());
+ Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(5).getState());
+
+ List<Long> compactionsToAbort =
Arrays.asList(compacts.get(0).getId(),compacts.get(1).getId(),compacts.get(3).getId());
+ AbortCompactionRequest rqst= new AbortCompactionRequest();
+ rqst.setCompactionIds(compactionsToAbort);
+ AbortCompactResponse resp = txnHandler.abortCompactions(rqst);
+ Assert.assertEquals(3,resp.getAbortedcompactsSize());
+ Map<Long,AbortCompactionResponseElement> res = resp.getAbortedcompacts();
+ List<AbortCompactionResponseElement> respList =
res.values().stream().collect(Collectors.toList());
+ Assert.assertEquals("Not Eligible",respList.get(0).getMessage());
+ Assert.assertEquals("Not Eligible",respList.get(1).getMessage());
+ Assert.assertEquals("Successfully aborted
Compaction",respList.get(2).getMessage());
Review Comment:
fixed
##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java:
##########
@@ -3411,6 +3415,73 @@ public void testShowCompactionOrder() throws Exception {
Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(5).getState());
}
+
+ @Test
+ public void testAbortCompaction() throws Exception {
+
+ d.destroy();
+ hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ d = new Driver(hiveConf);
+ //generate some compaction history
+ runStatementOnDriver("drop database if exists mydb1 cascade");
+ runStatementOnDriver("create database mydb1");
+
+ runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int)
partitioned by (p string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1')
compact 'MAJOR'");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ TestTxnCommands2.runCleaner(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3')
compact 'MAJOR'");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+ TestTxnCommands2.runWorker(hiveConf);
+ TestTxnCommands2.runCleaner(hiveConf);
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(11,12,'p1'),(13,14,'p1'),(11,12,'p2'),(13,14,'p2'),(11,12,'p3'),(13,14,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1')
compact 'MINOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ runStatementOnDriver("create table mydb1.tbl1 " + "(a int, b int)
partitioned by (ds string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl1" + " PARTITION(ds) " +
+ "
values(1,2,'today'),(3,4,'today'),(1,2,'tomorrow'),(3,4,'tomorrow'),(1,2,'yesterday'),(3,4,'yesterday')");
+ runStatementOnDriver("alter table mydb1.tbl1" + " PARTITION(ds='today')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ runStatementOnDriver("drop table if exists T1");
+ runStatementOnDriver("create table T1 (a int, b int) stored as orc
TBLPROPERTIES ('transactional'='true')");
+ runStatementOnDriver("insert into T1 values(0,2)");//makes delta_1_1 in T1
+ runStatementOnDriver("insert into T1 values(1,4)");//makes delta_2_2 in T2
+
+ //create failed compaction attempt so that compactor txn is aborted
+ HiveConf.setBoolVar(hiveConf,
HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
+ runStatementOnDriver("alter table T1 compact 'minor'");
+ TestTxnCommands2.runWorker(hiveConf);
+ // Verify compaction order
+ List<ShowCompactResponseElement> compacts =
+ txnHandler.showCompact(new ShowCompactRequest()).getCompacts();
+ Assert.assertEquals(6, compacts.size());
+ Assert.assertEquals(TxnStore.INITIATED_RESPONSE,
compacts.get(0).getState());
+ Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(1).getState());
+ Assert.assertEquals(TxnStore.CLEANING_RESPONSE,
compacts.get(2).getState());
+ Assert.assertEquals(TxnStore.CLEANING_RESPONSE,
compacts.get(3).getState());
+ Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE,
compacts.get(4).getState());
+ Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(5).getState());
+
+ List<Long> compactionsToAbort =
Arrays.asList(compacts.get(0).getId(),compacts.get(1).getId(),compacts.get(3).getId());
+ AbortCompactionRequest rqst= new AbortCompactionRequest();
+ rqst.setCompactionIds(compactionsToAbort);
+ AbortCompactResponse resp = txnHandler.abortCompactions(rqst);
+ Assert.assertEquals(3,resp.getAbortedcompactsSize());
+ Map<Long,AbortCompactionResponseElement> res = resp.getAbortedcompacts();
+ List<AbortCompactionResponseElement> respList =
res.values().stream().collect(Collectors.toList());
+ Assert.assertEquals("Not Eligible",respList.get(0).getMessage());
+ Assert.assertEquals("Not Eligible",respList.get(1).getMessage());
+ Assert.assertEquals("Successfully aborted
Compaction",respList.get(2).getMessage());
+ }
Review Comment:
fixed
Issue Time Tracking
-------------------
Worklog Id: (was: 840961)
Time Spent: 2h (was: 1h 50m)
> Cancel Compactions in initiated state
> -------------------------------------
>
> Key: HIVE-26804
> URL: https://issues.apache.org/jira/browse/HIVE-26804
> Project: Hive
> Issue Type: New Feature
> Components: Hive
> Reporter: KIRTI RUGE
> Assignee: KIRTI RUGE
> Priority: Major
> Labels: pull-request-available
> Time Spent: 2h
> Remaining Estimate: 0h
>
--
This message was sent by Atlassian Jira
(v8.20.10#820010)