rkirtir commented on code in PR #3608:
URL: https://github.com/apache/hive/pull/3608#discussion_r990898269
##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java:
##########
@@ -2117,4 +2121,168 @@ public void testIsRawFormatFile() throws Exception {
List<String> res = runStatementOnDriver("select * from file_formats");
Assert.assertEquals(3, res.size());
}
+ @Test
+ public void testShowCompactions() throws Exception {
+ d.destroy();
+ hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ d = new Driver(hiveConf);
+ //generate some compaction history
+ runStatementOnDriver("drop database if exists mydb1 cascade");
+ runStatementOnDriver("create database mydb1");
+ runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int)
partitioned by (p string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p3')
compact 'MAJOR' pool 'pool0'");
+ TestTxnCommands2.runWorker(hiveConf);
+ TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+
+ SessionState.get().setCurrentDatabase("mydb1");
+
+ //testing show compaction command
+ ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+ List<String> r = runStatementOnDriver("SHOW COMPACTIONS");
+ Assert.assertEquals(rsp.getCompacts().size()+1, r.size());//includes
Header row
+
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 STATUS 'ready for
cleaning'");
+
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getState().equals("ready
for cleaning")).count() +1,
Review Comment:
rsp is result from txnHandler.showCompact(new ShowCompactRequest())
and it is checked against r which is a result of driver command.
r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'poolx' TYPE
'MINOR' ");
//includes Header row
Assert.assertEquals(**rsp**.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
filter(x->x.getPoolName().equals("poolx")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
**r**.size());
##########
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java:
##########
@@ -2117,4 +2121,168 @@ public void testIsRawFormatFile() throws Exception {
List<String> res = runStatementOnDriver("select * from file_formats");
Assert.assertEquals(3, res.size());
}
+ @Test
+ public void testShowCompactions() throws Exception {
+ d.destroy();
+ hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ d = new Driver(hiveConf);
+ //generate some compaction history
+ runStatementOnDriver("drop database if exists mydb1 cascade");
+ runStatementOnDriver("create database mydb1");
+ runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int)
partitioned by (p string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p3')
compact 'MAJOR' pool 'pool0'");
+ TestTxnCommands2.runWorker(hiveConf);
+ TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+
+ SessionState.get().setCurrentDatabase("mydb1");
+
+ //testing show compaction command
+ ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+ List<String> r = runStatementOnDriver("SHOW COMPACTIONS");
+ Assert.assertEquals(rsp.getCompacts().size()+1, r.size());//includes
Header row
+
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 STATUS 'ready for
cleaning'");
+
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getState().equals("ready
for cleaning")).count() +1,
+ r.size());//includes Header row
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 TYPE 'MAJOR' ");
+
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+ filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());//includes Header row
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'poolx' TYPE
'MINOR' ");
+ //includes Header row
+
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+
filter(x->x.getPoolName().equals("poolx")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'pool0' TYPE
'MAJOR'");
+ Assert.assertEquals(2, r.size());//includes Header row
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 POOL 'pool0'");
+
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
+ filter(x->x.getPoolName().equals("pool0")).count()+1,
r.size());//includes Header row
+
+ r = runStatementOnDriver("SHOW COMPACTIONS DATABASE mydb1 POOL 'pool0'");
+ Assert.assertEquals(2, r.size());//includes Header row
+
+ r = runStatementOnDriver("SHOW COMPACTIONS tbl0 TYPE 'MAJOR' ");
Review Comment:
We are following below syntax:
SHOW COMPACTIONS <[DATABASE|SCHEMA <db>]|[[<db>.]<table> [PARTITION
(<partition_spec>)]]> [POOL <pool_name>] [TYPE <type>] [STATE <state>] [ORDER
BY <ob clause>] [LIMIT <number>]
##########
parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java:
##########
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.Is.is;
+
+public class TestParseShowCompactions {
+ ParseDriver parseDriver = new ParseDriver();
+
+ @Test
+ public void testShowCompactions() throws Exception {
+ ASTNode tree = parseDriver.parse(
+ "SHOW COMPACTIONS", null).getTree();
+
+ assertThat(tree.toStringTree(), is("tok_show_compactions <eof>"));
+ }
+
+ @Test
+ public void testShowCompactionsFilterDb() throws Exception {
+ ASTNode tree = parseDriver.parse(
+ "SHOW COMPACTIONS DATABASE db1", null).getTree();
+
+ assertThat(tree.toStringTree(), is("(tok_show_compactions db1)
<eof>"));
+ }
+
+ private static final String
EXPECTED_WHEN_FILTER_BY_DB_AND_ALL_AND_ORDER_BY = "\n" +
+ "nil\n" +
+ " TOK_SHOW_COMPACTIONS\n" +
+ " db1\n" +
+ " TOK_COMPACT_POOL\n" +
+ " 'pool0'\n" +
+ " TOK_COMPACTION_TYPE\n" +
+ " 'minor'\n" +
+ " TOK_COMPACTION_STATUS\n" +
+ " 'ready for clean'\n" +
+ " TOK_ORDERBY\n" +
+ " TOK_TABSORTCOLNAMEDESC\n" +
+ " TOK_NULLS_FIRST\n" +
+ " TOK_TABLE_OR_COL\n" +
+ " cq_table\n" +
+ " TOK_TABSORTCOLNAMEASC\n" +
+ " TOK_NULLS_LAST\n" +
+ " TOK_TABLE_OR_COL\n" +
+ " cq_state\n" +
+ " TOK_LIMIT\n" +
+ " 42\n" +
+ " <EOF>\n";
+
+ @Test
+ public void testShowCompactionsFilterDbAndAllAndOrder() throws Exception {
+ ASTNode tree = parseDriver.parse(
+ "SHOW COMPACTIONS DATABASE db1 POOL 'pool0' TYPE 'minor'
STATUS 'ready for clean' ORDER BY cq_table DESC, cq_state LIMIT 42",
null).getTree();
Review Comment:
order by and limit functionality is part of other JIRA
[HIVE-26580](https://issues.apache.org/jira/browse/HIVE-26580)
SHOW COMPACTIONS should support ordering and limiting functionality in
filtering options
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]