This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 97c2e45098a HIVE-26580: SHOW COMPACTIONS should support ordering and
limiting functionality (Kirti Ruge, reviewed by Denys Kuzmenko)
97c2e45098a is described below
commit 97c2e45098a194f4f4651e09f77e5e34dae122e4
Author: rkirtir <[email protected]>
AuthorDate: Tue Dec 6 19:23:15 2022 +0530
HIVE-26580: SHOW COMPACTIONS should support ordering and limiting
functionality (Kirti Ruge, reviewed by Denys Kuzmenko)
Closes #3830
---
.../ql/txn/compactor/CompactionPoolOnTezTest.java | 4 +-
.../apache/hadoop/hive/ql/parse/HiveLexerParent.g | 2 +-
.../org/apache/hadoop/hive/ql/parse/HiveParser.g | 6 +-
.../hadoop/hive/ql/parse/IdentifiersParser.g | 1 -
.../hive/ql/parse/TestParseShowCompactions.java | 2 +-
.../show/compactions/ShowCompactionsAnalyzer.java | 58 ++-
.../show/compactions/ShowCompactionsDesc.java | 16 +-
.../show/compactions/ShowCompactionsOperation.java | 16 +-
.../hadoop/hive/ql/parse/BaseSemanticAnalyzer.java | 28 ++
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 24 -
.../org/apache/hadoop/hive/ql/TestTxnCommands.java | 149 +++++-
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 16 +-
.../clientpositive/llap/dbtxnmgr_compact1.q.out | 2 +-
.../clientpositive/llap/dbtxnmgr_showlocks.q.out | 3 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 116 +++--
.../src/gen/thrift/gen-cpp/hive_metastore_types.h | 58 ++-
.../hive/metastore/api/ShowCompactRequest.java | 536 ++++++++++++++-------
.../gen-php/metastore/ShowCompactRequest.php | 96 +++-
.../src/gen/thrift/gen-py/hive_metastore/ttypes.py | 68 ++-
.../src/gen/thrift/gen-rb/hive_metastore_types.rb | 16 +-
.../src/main/thrift/hive_metastore.thrift | 10 +-
.../hadoop/hive/metastore/txn/TxnHandler.java | 26 +-
22 files changed, 891 insertions(+), 362 deletions(-)
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
index a366ea315fe..dd60c5eecca 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
@@ -139,7 +139,7 @@ public class CompactionPoolOnTezTest extends
CompactorOnTezTest {
driver.getResults(results);
Assert.assertEquals(3, results.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)" +
- "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteID", results.get(0));
+ "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteId", results.get(0));
Pattern p = Pattern.compile("(1|2)\tdefault\t(compaction_test|table2)\t
--- \tMAJOR\tinitiated.*(pool1|default).*");
for(int i = 1; i < results.size(); i++) {
Assert.assertTrue(p.matcher(results.get(i).toString()).matches());
@@ -164,7 +164,7 @@ public class CompactionPoolOnTezTest extends
CompactorOnTezTest {
driver.getResults(results);
Assert.assertEquals(2, results.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)" +
- "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteID",
+ "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteId",
results.get(0));
Pattern p = Pattern.compile("1|2\tdefault\tcompaction_test\t ---
\tMAJOR\tinitiated.*pool1.*");
Assert.assertTrue(p.matcher(results.get(1).toString()).matches());
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g
index 706b001f2b7..f6d90940632 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g
@@ -72,6 +72,7 @@ KW_DIRECTORY: 'DIRECTORY';
KW_LOCAL: 'LOCAL';
KW_TRANSFORM : 'TRANSFORM';
KW_USING: 'USING';
+KW_COMPACT_ID: 'COMPACTIONID';
KW_CLUSTER: 'CLUSTER';
KW_DISTRIBUTE: 'DISTRIBUTE';
KW_SORT: 'SORT';
@@ -354,7 +355,6 @@ KW_ACTIVATE: 'ACTIVATE';
KW_DEFAULT: 'DEFAULT';
KW_CHECK: 'CHECK';
KW_POOL: 'POOL';
-KW_ID: 'ID';
KW_MOVE: 'MOVE';
KW_DO: 'DO';
KW_ALLOC_FRACTION: 'ALLOC_FRACTION';
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index eab63ebd67e..3841a8f385c 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -696,7 +696,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
xlateMap.put("KW_DEFAULT", "DEFAULT");
xlateMap.put("KW_CHECK", "CHECK");
xlateMap.put("KW_POOL", "POOL");
- xlateMap.put("KW_ID", "ID");
+ xlateMap.put("KW_COMPACT_ID", "COMPACTIONID");
xlateMap.put("KW_MOVE", "MOVE");
xlateMap.put("KW_DO", "DO");
xlateMap.put("KW_ALLOC_FRACTION", "ALLOC_FRACTION");
@@ -1340,7 +1340,7 @@ showStatement
)
| KW_SHOW KW_COMPACTIONS
(
- (KW_ID) => compactionId -> ^(TOK_SHOW_COMPACTIONS compactionId)
+ (KW_COMPACT_ID) => compactionId -> ^(TOK_SHOW_COMPACTIONS compactionId)
|
(KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (dbName=identifier)
compactionPool? compactionType? compactionStatus? orderByClause? limitClause?
-> ^(TOK_SHOW_COMPACTIONS $dbName compactionPool? compactionType?
compactionStatus? orderByClause? limitClause?)
|
@@ -2951,7 +2951,7 @@ killQueryStatement
BEGIN SHOW COMPACTIONS statement
*/
compactionId
- : KW_ID EQUAL compactId=Number -> ^(TOK_COMPACT_ID $compactId)
+ : KW_COMPACT_ID EQUAL compactId=Number -> ^(TOK_COMPACT_ID $compactId)
;
compactionPool
: KW_POOL poolName=StringLiteral -> ^(TOK_COMPACT_POOL $poolName)
diff --git
a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index f7fa1f30a9e..921061a635a 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -976,7 +976,6 @@ nonReserved
| KW_SPEC
| KW_SYSTEM_TIME | KW_SYSTEM_VERSION
| KW_EXPIRE_SNAPSHOTS
- | KW_ID
;
//The following SQL2011 reserved keywords are used as function name only, but
not as identifiers.
diff --git
a/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java
b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java
index 5fa349b9f69..bc60eb5add3 100644
---
a/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java
+++
b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseShowCompactions.java
@@ -91,7 +91,7 @@ public class TestParseShowCompactions {
@Test
public void testShowCompactionsFilterID() throws Exception {
ASTNode tree = parseDriver.parse(
- "SHOW COMPACTIONS ID=1", null).getTree();
+ "SHOW COMPACTIONS compactionid =1", null).getTree();
assertThat(tree.toStringTree(), is("(tok_show_compactions
(tok_compact_id 1)) <eof>"));
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java
index 5fa8eb0b020..6fb7a499fd2 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.ddl.process.show.compactions;
+import org.apache.commons.lang3.EnumUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
@@ -28,9 +29,11 @@ import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec;
-
+import java.util.List;
import java.util.Map;
+import java.util.stream.Collectors;
/**
* Analyzer for show compactions commands.
@@ -50,6 +53,8 @@ public class ShowCompactionsAnalyzer extends
BaseSemanticAnalyzer {
String compactionType = null;
String compactionStatus = null;
long compactionId = 0;
+ String orderBy = null;
+ short limit = -1;
Map<String, String> partitionSpec = null;
if (root.getChildCount() > 6) {
throw new
SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(root.toStringTree()));
@@ -80,15 +85,64 @@ public class ShowCompactionsAnalyzer extends
BaseSemanticAnalyzer {
case HiveParser.TOK_COMPACT_ID:
compactionId = Long.parseLong(child.getChild(0).getText());
break;
+ case HiveParser.TOK_LIMIT:
+ limit = Short.valueOf((child.getChild(0)).getText());
+ break;
+ case HiveParser.TOK_ORDERBY:
+ orderBy = processSortOrderSpec(child);
+ break;
default:
dbName = stripQuotes(child.getText());
}
}
ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile(),
compactionId, dbName, tblName, poolName, compactionType,
- compactionStatus, partitionSpec);
+ compactionStatus, partitionSpec, limit, orderBy);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(),
getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowCompactionsDesc.SCHEMA));
}
+
+ private String processSortOrderSpec(ASTNode sortNode) {
+ List<PTFInvocationSpec.OrderExpression> orderExp =
processOrderSpec(sortNode).getExpressions();
+ Map<String, String> orderByAttributes = orderExp.stream().
+ collect(Collectors.toMap(x -> getDbColumnName(x.getExpression()), x ->
x.getOrder().toString()));
+ return orderByAttributes.entrySet().stream().map(e -> e.getKey() + "\t" +
e.getValue()).collect(Collectors.joining(","));
+ }
+
+ private String getDbColumnName(ASTNode expression) {
+ String dbColumnPrefix = "CC_";
+ String dbColumnName = expression.getChild(0) == null ?
expression.getText().replace("\'", "").toUpperCase() :
+ expression.getChild(0).getText().toUpperCase();
+ return EnumUtils.isValidEnum(CompactionColumn.class, dbColumnName) ?
CompactionColumn.valueOf(dbColumnName).toString() :
+ "\"" + dbColumnPrefix + dbColumnName + "\"";
+ }
+
+ private enum CompactionColumn {
+ COMPACTIONID("\"CC_ID\""),
+ DBNAME("\"CC_DATABASE\""),
+ TABNAME("\"CC_TABLE\""),
+ PARTNAME("\"CC_PARTITION\""),
+ ENQUEUETIME("\"CC_ENQUEUE_TIME\""),
+ STARTTIME("\"CC_START\""),
+ POOLNAME("\"CC_POOL_NAME\""),
+ NEXTTXNID("\"CC_NEXT_TXN_ID\""),
+ HADOOPJOBID("\"CC_HADOOP_JOB_ID\""),
+ WORKERHOST("\"CC_WORKER_ID\""),
+ WORKERID("\"CC_WORKER_ID\""),
+ DURATION("\"CC_END\""),
+ TXNID("\"CC_TXN_ID\""),
+ COMMITTIME("CC_COMMIT_TIME"),
+ HIGHESTWRITEID("CC_HIGHEST_WRITE_ID");
+ private final String colVal;
+
+ CompactionColumn(String colVal) {
+ this.colVal = colVal;
+ }
+
+ @Override
+ public String toString() {
+ return colVal;
+ }
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
index 55483f7f933..4e10800ec1b 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
@@ -45,10 +45,12 @@ public class ShowCompactionsDesc implements DDLDesc,
Serializable {
private final String compactionType;
private final String compactionStatus;
private final Map<String, String> partSpec;
+ private final short limit;
+ private final String orderBy;
public ShowCompactionsDesc(Path resFile, long compactionId, String dbName,
String tbName, String poolName, String compactionType,
- String compactionStatus, Map<String, String>
partSpec) {
+ String compactionStatus, Map<String, String>
partSpec, short limit, String orderBy) {
this.resFile = resFile.toString();
this.compactionId = compactionId;
this.poolName = poolName;
@@ -57,6 +59,8 @@ public class ShowCompactionsDesc implements DDLDesc,
Serializable {
this.compactionType = compactionType;
this.compactionStatus = compactionStatus;
this.partSpec = partSpec;
+ this.limit = limit;
+ this.orderBy = orderBy;
}
public String getResFile() {
@@ -94,5 +98,15 @@ public class ShowCompactionsDesc implements DDLDesc,
Serializable {
return partSpec;
}
+ @Explain(displayName = "limit", explainLevels = {Level.USER,
Level.DEFAULT, Level.EXTENDED})
+ public short getLimit() {
+ return limit;
+ }
+
+ @Explain(displayName = "orderBy", explainLevels = {Level.USER,
Level.DEFAULT, Level.EXTENDED})
+ public String getOrderBy() {
+ return orderBy;
+ }
+
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
index f173b72ff3f..cebe16176e6 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
@@ -80,12 +80,12 @@ public class ShowCompactionsOperation extends
DDLOperation<ShowCompactionsDesc>
private ShowCompactRequest getShowCompactioRequest(ShowCompactionsDesc desc)
throws SemanticException {
ShowCompactRequest request = new ShowCompactRequest();
if (isBlank(desc.getDbName()) && isNotBlank(desc.getTbName())) {
- request.setDbname(SessionState.get().getCurrentDatabase());
+ request.setDbName(SessionState.get().getCurrentDatabase());
} else {
- request.setDbname(desc.getDbName());
+ request.setDbName(desc.getDbName());
}
if (isNotBlank(desc.getTbName())) {
- request.setTablename(desc.getTbName());
+ request.setTbName(desc.getTbName());
}
if (isNotBlank(desc.getPoolName())) {
request.setPoolName(desc.getPoolName());
@@ -97,11 +97,17 @@ public class ShowCompactionsOperation extends
DDLOperation<ShowCompactionsDesc>
request.setState(compactionStateStr2Enum(desc.getCompactionStatus()).getSqlConst());
}
if (isNotEmpty(desc.getPartSpec())) {
- request.setPartitionname(AcidUtils.getPartitionName(desc.getPartSpec()));
+ request.setPartName(AcidUtils.getPartitionName(desc.getPartSpec()));
}
if(desc.getCompactionId()>0){
request.setId(desc.getCompactionId());
}
+ if (desc.getLimit()>0) {
+ request.setLimit(desc.getLimit());
+ }
+ if (isNotBlank(desc.getOrderBy())) {
+ request.setOrder(desc.getOrderBy());
+ }
return request;
}
@@ -144,7 +150,7 @@ public class ShowCompactionsOperation extends
DDLOperation<ShowCompactionsDesc>
os.write(Utilities.tabCode);
os.writeBytes("Commit Time");
os.write(Utilities.tabCode);
- os.writeBytes("Highest WriteID");
+ os.writeBytes("Highest WriteId");
os.write(Utilities.newLineCode);
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 8b58877e890..530b41a7056 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -116,6 +116,11 @@ import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
+import static
org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_FIRST;
+import static
org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_LAST;
+import static org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.ASC;
+import static org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC;
+
/**
* BaseSemanticAnalyzer.
*
@@ -1984,4 +1989,27 @@ public abstract class BaseSemanticAnalyzer {
return pCtx;
}
+ public PTFInvocationSpec.OrderSpec processOrderSpec(ASTNode sortNode) {
+ PTFInvocationSpec.OrderSpec oSpec = new PTFInvocationSpec.OrderSpec();
+ int exprCnt = sortNode.getChildCount();
+ for (int i = 0; i < exprCnt; i++) {
+ PTFInvocationSpec.OrderExpression exprSpec = new
PTFInvocationSpec.OrderExpression();
+ ASTNode orderSpec = (ASTNode) sortNode.getChild(i);
+ ASTNode nullOrderSpec = (ASTNode) orderSpec.getChild(0);
+ exprSpec.setExpression((ASTNode) nullOrderSpec.getChild(0));
+ if (orderSpec.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) {
+ exprSpec.setOrder(ASC);
+ } else {
+ exprSpec.setOrder(DESC);
+ }
+ if (nullOrderSpec.getType() == HiveParser.TOK_NULLS_FIRST) {
+ exprSpec.setNullOrder(NULLS_FIRST);
+ } else {
+ exprSpec.setNullOrder(NULLS_LAST);
+ }
+ oSpec.addExpression(exprSpec);
+ }
+ return oSpec;
+ }
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 7631861b6bf..3057f670880 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -14632,30 +14632,6 @@ public class SemanticAnalyzer extends
BaseSemanticAnalyzer {
return pSpec;
}
- private OrderSpec processOrderSpec(ASTNode sortNode) {
- OrderSpec oSpec = new OrderSpec();
- int exprCnt = sortNode.getChildCount();
- for(int i=0; i < exprCnt; i++) {
- OrderExpression exprSpec = new OrderExpression();
- ASTNode orderSpec = (ASTNode) sortNode.getChild(i);
- ASTNode nullOrderSpec = (ASTNode) orderSpec.getChild(0);
- exprSpec.setExpression((ASTNode) nullOrderSpec.getChild(0));
- if ( orderSpec.getType() == HiveParser.TOK_TABSORTCOLNAMEASC ) {
-
exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.ASC);
- }
- else {
-
exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC);
- }
- if ( nullOrderSpec.getType() == HiveParser.TOK_NULLS_FIRST ) {
-
exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_FIRST);
- } else {
-
exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_LAST);
- }
- oSpec.addExpression(exprSpec);
- }
- return oSpec;
- }
-
private PartitioningSpec processPTFPartitionSpec(ASTNode pSpecNode)
{
PartitioningSpec partitioning = new PartitioningSpec();
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index a829700b8b2..95b42dd5be3 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -34,6 +34,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.commons.lang3.StringUtils;
@@ -2161,17 +2162,17 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
Pattern p = Pattern.compile(".*mydb1.*\tready for cleaning.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
}
- r = runStatementOnDriver("SHOW COMPACTIONS ID=1");
+ r = runStatementOnDriver("SHOW COMPACTIONS COMPACTIONID=1");
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getId()==1).count()
+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile("1\t.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2180,8 +2181,8 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
- "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1.*\tMAJOR.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2197,8 +2198,8 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
filter(x->x.getPoolName().equals("pool0")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
- "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1.*\tMAJOR.*\tpool0.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2208,8 +2209,8 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getDbname().equals("mydb1")).
filter(x->x.getPoolName().equals("pool0")).count()+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
- "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1.*\tpool0.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2220,7 +2221,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getPoolName().equals("pool0")).count()+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1.*\tpool0.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i).toString()).matches());
@@ -2231,7 +2232,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());//includes Header row
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*tbl0.*\tMAJOR.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2243,7 +2244,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getTablename().equals("tbl0")).filter(x->x.getPartitionname().equals("p=p3")).count()
+ 1, r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1\ttbl0\tp=p3.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2255,7 +2256,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getPoolName().equals("pool0")).filter(x->x.getType().equals(CompactionType.MAJOR)).count()
+ 1, r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb1\ttbl0\tp=p3\tMAJOR.*\tpool0.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2280,7 +2281,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getState().equals("refused")).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
Pattern p = Pattern.compile(".*tbl2.*\trefused.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2291,7 +2292,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
Assert.assertEquals(rsp.getCompacts().stream().filter(x->x.getTablename().equals("tbl2")).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*tbl2.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2303,7 +2304,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getTablename().equals("tbl2")).filter(x->x.getPartitionname().equals("ds=mon")).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb\ttbl2\tds=mon.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2316,7 +2317,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb\ttbl2\tds=mon\tMAJOR.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2328,7 +2329,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getType().equals(CompactionType.MAJOR)).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb.*\tMAJOR.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2343,7 +2344,7 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
filter(x->x.getType().equals(CompactionType.MINOR)).count()+1,
r.size());
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
"\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
- "Highest WriteID", r.get(0));
+ "Highest WriteId", r.get(0));
p = Pattern.compile(".*mydb.*\tMINOR.*");
for(int i = 1; i < r.size(); i++) {
Assert.assertTrue(p.matcher(r.get(i)).matches());
@@ -2373,13 +2374,13 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
@Test
public void testShowCompactionInputValidation() throws Exception {
setUpCompactionRequestsData("mydb2","tbl2");
- executeCompactionRequest("mydb2","tbl2", "MAJOR","ds='mon'");
+ executeCompactionRequest("mydb2","tbl2", "MAJOR", "ds='mon'");
SessionState.get().setCurrentDatabase("mydb2");
//validation testing of paramters
expectedException.expect(RuntimeException.class);
List<String> r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb POOL
'pool0' TYPE 'MAJOR'");// validates db
- r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb2 TYPE 'MAJR'");//
validates compaction type
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb2 TYPE 'MAJR'");//
validates compaction type
r = runStatementOnDriver("SHOW COMPACTIONS mydb2.tbl1 PARTITION (ds='mon')
TYPE 'MINOR' " +
"STATUS 'ready for clean'");// validates table
r = runStatementOnDriver("SHOW COMPACTIONS mydb2.tbl2 PARTITION
(p=101,day='Monday') POOL 'pool0' TYPE 'minor' " +
@@ -2388,12 +2389,108 @@ public class TestTxnCommands extends
TxnCommandsBaseForTests {
"STATUS 'ready for clean'");//validates compaction status
}
+ @Test
+ public void testShowCompactionFilterSortingAndLimit() throws Exception {
+ runStatementOnDriver("drop database if exists mydb1 cascade");
+ runStatementOnDriver("create database mydb1");
+ runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int)
partitioned by (p string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+ "
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1')
compact 'MAJOR' pool 'poolx'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+
+ runStatementOnDriver("drop database if exists mydb cascade");
+ runStatementOnDriver("create database mydb");
+ runStatementOnDriver("create table mydb.tbl " + "(a int, b int)
partitioned by (ds string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb.tbl" + " PARTITION(ds) " +
+ "
values(1,2,'mon'),(3,4,'tue'),(1,2,'mon'),(3,4,'tue'),(1,2,'wed'),(3,4,'wed')");
+ runStatementOnDriver("alter table mydb.tbl" + " PARTITION(ds='mon')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb.tbl" + " PARTITION(ds='tue')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ runStatementOnDriver("create table mydb.tbl2 " + "(a int, b int)
partitioned by (dm string) clustered by (a) into " +
+ BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES
('transactional'='true')");
+ runStatementOnDriver("insert into mydb.tbl2" + " PARTITION(dm) " +
+ "
values(1,2,'xxx'),(3,4,'xxx'),(1,2,'yyy'),(3,4,'yyy'),(1,2,'zzz'),(3,4,'zzz')");
+ runStatementOnDriver("alter table mydb.tbl2" + " PARTITION(dm='yyy')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+ runStatementOnDriver("alter table mydb.tbl2" + " PARTITION(dm='zzz')
compact 'MAJOR'");
+ TestTxnCommands2.runWorker(hiveConf);
+
+ ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+
+ //includes Header row
+ List<String> r = runStatementOnDriver("SHOW COMPACTIONS");
+ Assert.assertEquals(rsp.getCompacts().size() + 1, r.size());
+ r = runStatementOnDriver("SHOW COMPACTIONS LIMIT 3");
+ Assert.assertEquals(4, r.size());
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' LIMIT
2");
+ Assert.assertEquals(3, r.size());
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' ORDER
BY 'tabname' DESC,'partname' ASC");
+ Assert.assertEquals(5, r.size());
+
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
+ Pattern p = Pattern.compile(".*mydb\ttbl2\tdm.*");
+ for (int i = 1; i < r.size() - 3; i++) {
+ Assert.assertTrue(p.matcher(r.get(i)).matches());
+ }
+ p = Pattern.compile(".*mydb\ttbl\tds.*");
+ for (int i = 3; i < r.size() - 1; i++) {
+ Assert.assertTrue(p.matcher(r.get(i)).matches());
+ }
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb1 TYPE 'MAJOR' ORDER
BY 'poolname' ASC");
+ Assert.assertEquals(3, r.size());
+
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
+ List<String> txnIdActualList = r.stream().skip(1).map(x ->
x.split("\t")[15]).collect(Collectors.toList());
+ List<String> txnIdExpectedList = r.stream().skip(1).map(x ->
x.split("\t")[15]).sorted(Collections.reverseOrder()).
+ collect(Collectors.toList());
+ Assert.assertEquals(txnIdExpectedList, txnIdActualList);
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' ORDER
BY 'txnid' DESC");
+ Assert.assertEquals(5, r.size());
+
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
+ txnIdActualList = r.stream().skip(1).map(x ->
x.split("\t")[16]).collect(Collectors.toList());
+ txnIdExpectedList = r.stream().skip(1).map(x ->
x.split("\t")[16]).sorted(Collections.reverseOrder()).
+ collect(Collectors.toList());
+ Collections.sort(txnIdExpectedList, Collections.reverseOrder());
+ Assert.assertEquals(txnIdExpectedList, txnIdActualList);
+
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' ORDER
BY TxnId DESC");
+ Assert.assertEquals(5, r.size());
+
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
host\tWorker\tEnqueue Time\tStart Time" +
+ "\tDuration(ms)\tHadoopJobId\tError message\tInitiator
host\tInitiator\tPool name\tTxnId\tNext TxnId\tCommit Time\t" +
+ "Highest WriteId", r.get(0));
+ txnIdActualList = r.stream().skip(1).map(x ->
x.split("\t")[16]).collect(Collectors.toList());
+ txnIdExpectedList = r.stream().skip(1).map(x ->
x.split("\t")[16]).sorted(Collections.reverseOrder()).
+ collect(Collectors.toList());
+ Assert.assertEquals(txnIdExpectedList, txnIdActualList);
+
+
+ expectedException.expect(RuntimeException.class);
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' ORDER
BY tbl DESC,PARTITIONS ASC");
+ expectedException.expect(RuntimeException.class);
+ r = runStatementOnDriver("SHOW COMPACTIONS SCHEMA mydb TYPE 'MAJOR' ORDER
BY tbl DESC,PARTITIONS ASC");
+
+ }
+
private void setUpCompactionRequestsData(String dbName, String tbName)
throws Exception {
- runStatementOnDriver("drop database if exists "+dbName);
- runStatementOnDriver("create database "+dbName);
- runStatementOnDriver("create table "+dbName+"."+tbName+" (a int, b int)
partitioned by (ds String) stored as orc " +
- "TBLPROPERTIES ('transactional'='true')");
- runStatementOnDriver("insert into "+dbName+"."+tbName+" PARTITION (ds) " +
+ runStatementOnDriver("drop database if exists " + dbName);
+ runStatementOnDriver("create database " + dbName);
+ runStatementOnDriver("create table " + dbName + "." + tbName + " (a int, b
int) partitioned by (ds String) stored as orc " +
+ "TBLPROPERTIES ('transactional'='true')");
+ runStatementOnDriver("insert into " + dbName + "." + tbName + " PARTITION
(ds) " +
"
values(1,2,'mon'),(3,4,'mon'),(1,2,'tue'),(3,4,'tue'),(1,2,'wed'),(3,4,'wed')");
}
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index b81b574e2fc..ccf6bc1df14 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -3466,24 +3466,24 @@ public class TestTxnCommands2 extends
TxnCommandsBaseForTests {
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(4, compacts.size());
ShowCompactRequest scr = new ShowCompactRequest();
- scr.setDbname("bar");
+ scr.setDbName("bar");
Assert.assertEquals(1, txnHandler.showCompact(scr).getCompacts().size());
scr = new ShowCompactRequest();
- scr.setTablename("bar");
+ scr.setTbName("bar");
scr.setPoolName("mypool");
List<ShowCompactResponseElement> compRsp
=txnHandler.showCompact(scr).getCompacts();
Assert.assertEquals(1, compRsp.size());
Assert.assertEquals("mypool", compRsp.get(0).getPoolName());
scr = new ShowCompactRequest();
- scr.setTablename("bar1");
+ scr.setTbName("bar1");
Assert.assertEquals(2, txnHandler.showCompact(scr).getCompacts().size());
scr = new ShowCompactRequest();
- scr.setDbname("bar22");
- scr.setTablename("bar1");
+ scr.setDbName("bar22");
+ scr.setTbName("bar1");
Assert.assertEquals(0, txnHandler.showCompact(scr).getCompacts().size());
scr = new ShowCompactRequest();
- scr.setDbname("bar");
- scr.setTablename("bar1");
+ scr.setDbName("bar");
+ scr.setTbName("bar1");
Assert.assertEquals(1, txnHandler.showCompact(scr).getCompacts().size());
scr = new ShowCompactRequest();
scr.setState("i");
@@ -3499,7 +3499,7 @@ public class TestTxnCommands2 extends
TxnCommandsBaseForTests {
Assert.assertEquals(1, txnHandler.showCompact(scr).getCompacts().size());
scr = new ShowCompactRequest();
- scr.setPartitionname("ds=today");
+ scr.setPartName("ds=today");
Assert.assertEquals(4, txnHandler.showCompact(scr).getCompacts().size());
}
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
index 2a0f87b3932..6f0559197eb 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
@@ -53,7 +53,7 @@ PREHOOK: query: show compactions
PREHOOK: type: SHOW COMPACTIONS
POSTHOOK: query: show compactions
POSTHOOK: type: SHOW COMPACTIONS
-CompactionId Database Table Partition Type State Worker
host Worker Enqueue Time Start Time Duration(ms) HadoopJobId
Error message Initiator host Initiator Pool name TxnId Next
TxnId Commit Time Highest WriteID
+CompactionId Database Table Partition Type State Worker
host Worker Enqueue Time Start Time Duration(ms) HadoopJobId
Error message Initiator host Initiator Pool name TxnId Next
TxnId Commit Time Highest WriteId
1 default t1_n153 --- MAJOR initiated --- ---
#Masked# --- --- --- --- #Masked# manual default
0 0 0 ---
2 default t2_n153 --- MINOR initiated --- ---
#Masked# --- --- --- --- #Masked# manual default
0 0 0 ---
PREHOOK: query: drop table T1_n153
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
b/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
index d1756bb86bf..81c1e18688a 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
@@ -139,6 +139,7 @@ STAGE PLANS:
Stage: Stage-0
Show Compactions
compactionId: 0
+ limit:
Stage: Stage-1
Fetch Operator
@@ -150,7 +151,7 @@ PREHOOK: query: show compactions
PREHOOK: type: SHOW COMPACTIONS
POSTHOOK: query: show compactions
POSTHOOK: type: SHOW COMPACTIONS
-CompactionId Database Table Partition Type State Worker
host Worker Enqueue Time Start Time Duration(ms) HadoopJobId
Error message Initiator host Initiator Pool name TxnId Next
TxnId Commit Time Highest WriteID
+CompactionId Database Table Partition Type State Worker
host Worker Enqueue Time Start Time Duration(ms) HadoopJobId
Error message Initiator host Initiator Pool name TxnId Next
TxnId Commit Time Highest WriteId
1 default partitioned_acid_table p=abc MINOR initiated ---
--- #Masked# --- --- --- --- #Masked# manual
default 0 0 0 ---
PREHOOK: query: drop table partitioned_acid_table
PREHOOK: type: DROPTABLE
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index b963b22c607..9a043797ce3 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -28129,19 +28129,19 @@ void ShowCompactRequest::__set_poolName(const
std::string& val) {
__isset.poolName = true;
}
-void ShowCompactRequest::__set_dbname(const std::string& val) {
- this->dbname = val;
-__isset.dbname = true;
+void ShowCompactRequest::__set_dbName(const std::string& val) {
+ this->dbName = val;
+__isset.dbName = true;
}
-void ShowCompactRequest::__set_tablename(const std::string& val) {
- this->tablename = val;
-__isset.tablename = true;
+void ShowCompactRequest::__set_tbName(const std::string& val) {
+ this->tbName = val;
+__isset.tbName = true;
}
-void ShowCompactRequest::__set_partitionname(const std::string& val) {
- this->partitionname = val;
-__isset.partitionname = true;
+void ShowCompactRequest::__set_partName(const std::string& val) {
+ this->partName = val;
+__isset.partName = true;
}
void ShowCompactRequest::__set_type(const CompactionType::type val) {
@@ -28153,6 +28153,16 @@ void ShowCompactRequest::__set_state(const
std::string& val) {
this->state = val;
__isset.state = true;
}
+
+void ShowCompactRequest::__set_limit(const int64_t val) {
+ this->limit = val;
+__isset.limit = true;
+}
+
+void ShowCompactRequest::__set_order(const std::string& val) {
+ this->order = val;
+__isset.order = true;
+}
std::ostream& operator<<(std::ostream& out, const ShowCompactRequest& obj)
{
obj.printTo(out);
@@ -28199,24 +28209,24 @@ uint32_t
ShowCompactRequest::read(::apache::thrift::protocol::TProtocol* iprot)
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->dbname);
- this->__isset.dbname = true;
+ xfer += iprot->readString(this->dbName);
+ this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->tablename);
- this->__isset.tablename = true;
+ xfer += iprot->readString(this->tbName);
+ this->__isset.tbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->partitionname);
- this->__isset.partitionname = true;
+ xfer += iprot->readString(this->partName);
+ this->__isset.partName = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -28239,6 +28249,22 @@ uint32_t
ShowCompactRequest::read(::apache::thrift::protocol::TProtocol* iprot)
xfer += iprot->skip(ftype);
}
break;
+ case 8:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->limit);
+ this->__isset.limit = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 9:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->order);
+ this->__isset.order = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -28266,19 +28292,19 @@ uint32_t
ShowCompactRequest::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeString(this->poolName);
xfer += oprot->writeFieldEnd();
}
- if (this->__isset.dbname) {
- xfer += oprot->writeFieldBegin("dbname",
::apache::thrift::protocol::T_STRING, 3);
- xfer += oprot->writeString(this->dbname);
+ if (this->__isset.dbName) {
+ xfer += oprot->writeFieldBegin("dbName",
::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
}
- if (this->__isset.tablename) {
- xfer += oprot->writeFieldBegin("tablename",
::apache::thrift::protocol::T_STRING, 4);
- xfer += oprot->writeString(this->tablename);
+ if (this->__isset.tbName) {
+ xfer += oprot->writeFieldBegin("tbName",
::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->tbName);
xfer += oprot->writeFieldEnd();
}
- if (this->__isset.partitionname) {
- xfer += oprot->writeFieldBegin("partitionname",
::apache::thrift::protocol::T_STRING, 5);
- xfer += oprot->writeString(this->partitionname);
+ if (this->__isset.partName) {
+ xfer += oprot->writeFieldBegin("partName",
::apache::thrift::protocol::T_STRING, 5);
+ xfer += oprot->writeString(this->partName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.type) {
@@ -28291,6 +28317,16 @@ uint32_t
ShowCompactRequest::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeString(this->state);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.limit) {
+ xfer += oprot->writeFieldBegin("limit", ::apache::thrift::protocol::T_I64,
8);
+ xfer += oprot->writeI64(this->limit);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.order) {
+ xfer += oprot->writeFieldBegin("order",
::apache::thrift::protocol::T_STRING, 9);
+ xfer += oprot->writeString(this->order);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -28300,32 +28336,38 @@ void swap(ShowCompactRequest &a, ShowCompactRequest
&b) {
using ::std::swap;
swap(a.id, b.id);
swap(a.poolName, b.poolName);
- swap(a.dbname, b.dbname);
- swap(a.tablename, b.tablename);
- swap(a.partitionname, b.partitionname);
+ swap(a.dbName, b.dbName);
+ swap(a.tbName, b.tbName);
+ swap(a.partName, b.partName);
swap(a.type, b.type);
swap(a.state, b.state);
+ swap(a.limit, b.limit);
+ swap(a.order, b.order);
swap(a.__isset, b.__isset);
}
ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other996) {
id = other996.id;
poolName = other996.poolName;
- dbname = other996.dbname;
- tablename = other996.tablename;
- partitionname = other996.partitionname;
+ dbName = other996.dbName;
+ tbName = other996.tbName;
+ partName = other996.partName;
type = other996.type;
state = other996.state;
+ limit = other996.limit;
+ order = other996.order;
__isset = other996.__isset;
}
ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest&
other997) {
id = other997.id;
poolName = other997.poolName;
- dbname = other997.dbname;
- tablename = other997.tablename;
- partitionname = other997.partitionname;
+ dbName = other997.dbName;
+ tbName = other997.tbName;
+ partName = other997.partName;
type = other997.type;
state = other997.state;
+ limit = other997.limit;
+ order = other997.order;
__isset = other997.__isset;
return *this;
}
@@ -28334,11 +28376,13 @@ void ShowCompactRequest::printTo(std::ostream& out)
const {
out << "ShowCompactRequest(";
out << "id="; (__isset.id ? (out << to_string(id)) : (out << "<null>"));
out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName))
: (out << "<null>"));
- out << ", " << "dbname="; (__isset.dbname ? (out << to_string(dbname)) :
(out << "<null>"));
- out << ", " << "tablename="; (__isset.tablename ? (out <<
to_string(tablename)) : (out << "<null>"));
- out << ", " << "partitionname="; (__isset.partitionname ? (out <<
to_string(partitionname)) : (out << "<null>"));
+ out << ", " << "dbName="; (__isset.dbName ? (out << to_string(dbName)) :
(out << "<null>"));
+ out << ", " << "tbName="; (__isset.tbName ? (out << to_string(tbName)) :
(out << "<null>"));
+ out << ", " << "partName="; (__isset.partName ? (out << to_string(partName))
: (out << "<null>"));
out << ", " << "type="; (__isset.type ? (out << to_string(type)) : (out <<
"<null>"));
out << ", " << "state="; (__isset.state ? (out << to_string(state)) : (out
<< "<null>"));
+ out << ", " << "limit="; (__isset.limit ? (out << to_string(limit)) : (out
<< "<null>"));
+ out << ", " << "order="; (__isset.order ? (out << to_string(order)) : (out
<< "<null>"));
out << ")";
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 9fada9c3723..b57f953746e 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -11155,14 +11155,16 @@ void swap(CompactionResponse &a, CompactionResponse
&b);
std::ostream& operator<<(std::ostream& out, const CompactionResponse& obj);
typedef struct _ShowCompactRequest__isset {
- _ShowCompactRequest__isset() : id(false), poolName(false), dbname(false),
tablename(false), partitionname(false), type(false), state(false) {}
+ _ShowCompactRequest__isset() : id(false), poolName(false), dbName(false),
tbName(false), partName(false), type(false), state(false), limit(false),
order(false) {}
bool id :1;
bool poolName :1;
- bool dbname :1;
- bool tablename :1;
- bool partitionname :1;
+ bool dbName :1;
+ bool tbName :1;
+ bool partName :1;
bool type :1;
bool state :1;
+ bool limit :1;
+ bool order :1;
} _ShowCompactRequest__isset;
class ShowCompactRequest : public virtual ::apache::thrift::TBase {
@@ -11173,25 +11175,29 @@ class ShowCompactRequest : public virtual
::apache::thrift::TBase {
ShowCompactRequest() noexcept
: id(0),
poolName(),
- dbname(),
- tablename(),
- partitionname(),
+ dbName(),
+ tbName(),
+ partName(),
type(static_cast<CompactionType::type>(0)),
- state() {
+ state(),
+ limit(0),
+ order() {
}
virtual ~ShowCompactRequest() noexcept;
int64_t id;
std::string poolName;
- std::string dbname;
- std::string tablename;
- std::string partitionname;
+ std::string dbName;
+ std::string tbName;
+ std::string partName;
/**
*
* @see CompactionType
*/
CompactionType::type type;
std::string state;
+ int64_t limit;
+ std::string order;
_ShowCompactRequest__isset __isset;
@@ -11199,16 +11205,20 @@ class ShowCompactRequest : public virtual
::apache::thrift::TBase {
void __set_poolName(const std::string& val);
- void __set_dbname(const std::string& val);
+ void __set_dbName(const std::string& val);
- void __set_tablename(const std::string& val);
+ void __set_tbName(const std::string& val);
- void __set_partitionname(const std::string& val);
+ void __set_partName(const std::string& val);
void __set_type(const CompactionType::type val);
void __set_state(const std::string& val);
+ void __set_limit(const int64_t val);
+
+ void __set_order(const std::string& val);
+
bool operator == (const ShowCompactRequest & rhs) const
{
if (__isset.id != rhs.__isset.id)
@@ -11219,17 +11229,17 @@ class ShowCompactRequest : public virtual
::apache::thrift::TBase {
return false;
else if (__isset.poolName && !(poolName == rhs.poolName))
return false;
- if (__isset.dbname != rhs.__isset.dbname)
+ if (__isset.dbName != rhs.__isset.dbName)
return false;
- else if (__isset.dbname && !(dbname == rhs.dbname))
+ else if (__isset.dbName && !(dbName == rhs.dbName))
return false;
- if (__isset.tablename != rhs.__isset.tablename)
+ if (__isset.tbName != rhs.__isset.tbName)
return false;
- else if (__isset.tablename && !(tablename == rhs.tablename))
+ else if (__isset.tbName && !(tbName == rhs.tbName))
return false;
- if (__isset.partitionname != rhs.__isset.partitionname)
+ if (__isset.partName != rhs.__isset.partName)
return false;
- else if (__isset.partitionname && !(partitionname == rhs.partitionname))
+ else if (__isset.partName && !(partName == rhs.partName))
return false;
if (__isset.type != rhs.__isset.type)
return false;
@@ -11239,6 +11249,14 @@ class ShowCompactRequest : public virtual
::apache::thrift::TBase {
return false;
else if (__isset.state && !(state == rhs.state))
return false;
+ if (__isset.limit != rhs.__isset.limit)
+ return false;
+ else if (__isset.limit && !(limit == rhs.limit))
+ return false;
+ if (__isset.order != rhs.__isset.order)
+ return false;
+ else if (__isset.order && !(order == rhs.order))
+ return false;
return true;
}
bool operator != (const ShowCompactRequest &rhs) const {
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
index cb99c591db0..4f12c519dab 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
@@ -13,36 +13,42 @@ package org.apache.hadoop.hive.metastore.api;
private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new
org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64,
(short)1);
private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC
= new org.apache.thrift.protocol.TField("poolName",
org.apache.thrift.protocol.TType.STRING, (short)2);
- private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC =
new org.apache.thrift.protocol.TField("dbname",
org.apache.thrift.protocol.TType.STRING, (short)3);
- private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC
= new org.apache.thrift.protocol.TField("tablename",
org.apache.thrift.protocol.TType.STRING, (short)4);
- private static final org.apache.thrift.protocol.TField
PARTITIONNAME_FIELD_DESC = new
org.apache.thrift.protocol.TField("partitionname",
org.apache.thrift.protocol.TType.STRING, (short)5);
+ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC =
new org.apache.thrift.protocol.TField("dbName",
org.apache.thrift.protocol.TType.STRING, (short)3);
+ private static final org.apache.thrift.protocol.TField TB_NAME_FIELD_DESC =
new org.apache.thrift.protocol.TField("tbName",
org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC
= new org.apache.thrift.protocol.TField("partName",
org.apache.thrift.protocol.TType.STRING, (short)5);
private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new
org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32,
(short)6);
private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC =
new org.apache.thrift.protocol.TField("state",
org.apache.thrift.protocol.TType.STRING, (short)7);
+ private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC =
new org.apache.thrift.protocol.TField("limit",
org.apache.thrift.protocol.TType.I64, (short)8);
+ private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC =
new org.apache.thrift.protocol.TField("order",
org.apache.thrift.protocol.TType.STRING, (short)9);
private static final org.apache.thrift.scheme.SchemeFactory
STANDARD_SCHEME_FACTORY = new ShowCompactRequestStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory
TUPLE_SCHEME_FACTORY = new ShowCompactRequestTupleSchemeFactory();
private long id; // optional
private @org.apache.thrift.annotation.Nullable java.lang.String poolName; //
optional
- private @org.apache.thrift.annotation.Nullable java.lang.String dbname; //
optional
- private @org.apache.thrift.annotation.Nullable java.lang.String tablename;
// optional
- private @org.apache.thrift.annotation.Nullable java.lang.String
partitionname; // optional
+ private @org.apache.thrift.annotation.Nullable java.lang.String dbName; //
optional
+ private @org.apache.thrift.annotation.Nullable java.lang.String tbName; //
optional
+ private @org.apache.thrift.annotation.Nullable java.lang.String partName; //
optional
private @org.apache.thrift.annotation.Nullable CompactionType type; //
optional
private @org.apache.thrift.annotation.Nullable java.lang.String state; //
optional
+ private long limit; // optional
+ private @org.apache.thrift.annotation.Nullable java.lang.String order; //
optional
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID((short)1, "id"),
POOL_NAME((short)2, "poolName"),
- DBNAME((short)3, "dbname"),
- TABLENAME((short)4, "tablename"),
- PARTITIONNAME((short)5, "partitionname"),
+ DB_NAME((short)3, "dbName"),
+ TB_NAME((short)4, "tbName"),
+ PART_NAME((short)5, "partName"),
/**
*
* @see CompactionType
*/
TYPE((short)6, "type"),
- STATE((short)7, "state");
+ STATE((short)7, "state"),
+ LIMIT((short)8, "limit"),
+ ORDER((short)9, "order");
private static final java.util.Map<java.lang.String, _Fields> byName = new
java.util.HashMap<java.lang.String, _Fields>();
@@ -62,16 +68,20 @@ package org.apache.hadoop.hive.metastore.api;
return ID;
case 2: // POOL_NAME
return POOL_NAME;
- case 3: // DBNAME
- return DBNAME;
- case 4: // TABLENAME
- return TABLENAME;
- case 5: // PARTITIONNAME
- return PARTITIONNAME;
+ case 3: // DB_NAME
+ return DB_NAME;
+ case 4: // TB_NAME
+ return TB_NAME;
+ case 5: // PART_NAME
+ return PART_NAME;
case 6: // TYPE
return TYPE;
case 7: // STATE
return STATE;
+ case 8: // LIMIT
+ return LIMIT;
+ case 9: // ORDER
+ return ORDER;
default:
return null;
}
@@ -114,8 +124,9 @@ package org.apache.hadoop.hive.metastore.api;
// isset id assignments
private static final int __ID_ISSET_ID = 0;
+ private static final int __LIMIT_ISSET_ID = 1;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] =
{_Fields.ID,_Fields.POOL_NAME,_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTITIONNAME,_Fields.TYPE,_Fields.STATE};
+ private static final _Fields optionals[] =
{_Fields.ID,_Fields.POOL_NAME,_Fields.DB_NAME,_Fields.TB_NAME,_Fields.PART_NAME,_Fields.TYPE,_Fields.STATE,_Fields.LIMIT,_Fields.ORDER};
public static final java.util.Map<_Fields,
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields,
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -123,16 +134,20 @@ package org.apache.hadoop.hive.metastore.api;
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.POOL_NAME, new
org.apache.thrift.meta_data.FieldMetaData("poolName",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.DBNAME, new
org.apache.thrift.meta_data.FieldMetaData("dbname",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ tmpMap.put(_Fields.DB_NAME, new
org.apache.thrift.meta_data.FieldMetaData("dbName",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.TABLENAME, new
org.apache.thrift.meta_data.FieldMetaData("tablename",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ tmpMap.put(_Fields.TB_NAME, new
org.apache.thrift.meta_data.FieldMetaData("tbName",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.PARTITIONNAME, new
org.apache.thrift.meta_data.FieldMetaData("partitionname",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ tmpMap.put(_Fields.PART_NAME, new
org.apache.thrift.meta_data.FieldMetaData("partName",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.TYPE, new
org.apache.thrift.meta_data.FieldMetaData("type",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
CompactionType.class)));
tmpMap.put(_Fields.STATE, new
org.apache.thrift.meta_data.FieldMetaData("state",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.LIMIT, new
org.apache.thrift.meta_data.FieldMetaData("limit",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.ORDER, new
org.apache.thrift.meta_data.FieldMetaData("order",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactRequest.class,
metaDataMap);
}
@@ -149,14 +164,14 @@ package org.apache.hadoop.hive.metastore.api;
if (other.isSetPoolName()) {
this.poolName = other.poolName;
}
- if (other.isSetDbname()) {
- this.dbname = other.dbname;
+ if (other.isSetDbName()) {
+ this.dbName = other.dbName;
}
- if (other.isSetTablename()) {
- this.tablename = other.tablename;
+ if (other.isSetTbName()) {
+ this.tbName = other.tbName;
}
- if (other.isSetPartitionname()) {
- this.partitionname = other.partitionname;
+ if (other.isSetPartName()) {
+ this.partName = other.partName;
}
if (other.isSetType()) {
this.type = other.type;
@@ -164,6 +179,10 @@ package org.apache.hadoop.hive.metastore.api;
if (other.isSetState()) {
this.state = other.state;
}
+ this.limit = other.limit;
+ if (other.isSetOrder()) {
+ this.order = other.order;
+ }
}
public ShowCompactRequest deepCopy() {
@@ -175,11 +194,14 @@ package org.apache.hadoop.hive.metastore.api;
setIdIsSet(false);
this.id = 0;
this.poolName = null;
- this.dbname = null;
- this.tablename = null;
- this.partitionname = null;
+ this.dbName = null;
+ this.tbName = null;
+ this.partName = null;
this.type = null;
this.state = null;
+ setLimitIsSet(false);
+ this.limit = 0;
+ this.order = null;
}
public long getId() {
@@ -229,74 +251,74 @@ package org.apache.hadoop.hive.metastore.api;
}
@org.apache.thrift.annotation.Nullable
- public java.lang.String getDbname() {
- return this.dbname;
+ public java.lang.String getDbName() {
+ return this.dbName;
}
- public void setDbname(@org.apache.thrift.annotation.Nullable
java.lang.String dbname) {
- this.dbname = dbname;
+ public void setDbName(@org.apache.thrift.annotation.Nullable
java.lang.String dbName) {
+ this.dbName = dbName;
}
- public void unsetDbname() {
- this.dbname = null;
+ public void unsetDbName() {
+ this.dbName = null;
}
- /** Returns true if field dbname is set (has been assigned a value) and
false otherwise */
- public boolean isSetDbname() {
- return this.dbname != null;
+ /** Returns true if field dbName is set (has been assigned a value) and
false otherwise */
+ public boolean isSetDbName() {
+ return this.dbName != null;
}
- public void setDbnameIsSet(boolean value) {
+ public void setDbNameIsSet(boolean value) {
if (!value) {
- this.dbname = null;
+ this.dbName = null;
}
}
@org.apache.thrift.annotation.Nullable
- public java.lang.String getTablename() {
- return this.tablename;
+ public java.lang.String getTbName() {
+ return this.tbName;
}
- public void setTablename(@org.apache.thrift.annotation.Nullable
java.lang.String tablename) {
- this.tablename = tablename;
+ public void setTbName(@org.apache.thrift.annotation.Nullable
java.lang.String tbName) {
+ this.tbName = tbName;
}
- public void unsetTablename() {
- this.tablename = null;
+ public void unsetTbName() {
+ this.tbName = null;
}
- /** Returns true if field tablename is set (has been assigned a value) and
false otherwise */
- public boolean isSetTablename() {
- return this.tablename != null;
+ /** Returns true if field tbName is set (has been assigned a value) and
false otherwise */
+ public boolean isSetTbName() {
+ return this.tbName != null;
}
- public void setTablenameIsSet(boolean value) {
+ public void setTbNameIsSet(boolean value) {
if (!value) {
- this.tablename = null;
+ this.tbName = null;
}
}
@org.apache.thrift.annotation.Nullable
- public java.lang.String getPartitionname() {
- return this.partitionname;
+ public java.lang.String getPartName() {
+ return this.partName;
}
- public void setPartitionname(@org.apache.thrift.annotation.Nullable
java.lang.String partitionname) {
- this.partitionname = partitionname;
+ public void setPartName(@org.apache.thrift.annotation.Nullable
java.lang.String partName) {
+ this.partName = partName;
}
- public void unsetPartitionname() {
- this.partitionname = null;
+ public void unsetPartName() {
+ this.partName = null;
}
- /** Returns true if field partitionname is set (has been assigned a value)
and false otherwise */
- public boolean isSetPartitionname() {
- return this.partitionname != null;
+ /** Returns true if field partName is set (has been assigned a value) and
false otherwise */
+ public boolean isSetPartName() {
+ return this.partName != null;
}
- public void setPartitionnameIsSet(boolean value) {
+ public void setPartNameIsSet(boolean value) {
if (!value) {
- this.partitionname = null;
+ this.partName = null;
}
}
@@ -356,6 +378,52 @@ package org.apache.hadoop.hive.metastore.api;
}
}
+ public long getLimit() {
+ return this.limit;
+ }
+
+ public void setLimit(long limit) {
+ this.limit = limit;
+ setLimitIsSet(true);
+ }
+
+ public void unsetLimit() {
+ __isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __LIMIT_ISSET_ID);
+ }
+
+ /** Returns true if field limit is set (has been assigned a value) and false
otherwise */
+ public boolean isSetLimit() {
+ return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
__LIMIT_ISSET_ID);
+ }
+
+ public void setLimitIsSet(boolean value) {
+ __isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID,
value);
+ }
+
+ @org.apache.thrift.annotation.Nullable
+ public java.lang.String getOrder() {
+ return this.order;
+ }
+
+ public void setOrder(@org.apache.thrift.annotation.Nullable java.lang.String
order) {
+ this.order = order;
+ }
+
+ public void unsetOrder() {
+ this.order = null;
+ }
+
+ /** Returns true if field order is set (has been assigned a value) and false
otherwise */
+ public boolean isSetOrder() {
+ return this.order != null;
+ }
+
+ public void setOrderIsSet(boolean value) {
+ if (!value) {
+ this.order = null;
+ }
+ }
+
public void setFieldValue(_Fields field,
@org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case ID:
@@ -374,27 +442,27 @@ package org.apache.hadoop.hive.metastore.api;
}
break;
- case DBNAME:
+ case DB_NAME:
if (value == null) {
- unsetDbname();
+ unsetDbName();
} else {
- setDbname((java.lang.String)value);
+ setDbName((java.lang.String)value);
}
break;
- case TABLENAME:
+ case TB_NAME:
if (value == null) {
- unsetTablename();
+ unsetTbName();
} else {
- setTablename((java.lang.String)value);
+ setTbName((java.lang.String)value);
}
break;
- case PARTITIONNAME:
+ case PART_NAME:
if (value == null) {
- unsetPartitionname();
+ unsetPartName();
} else {
- setPartitionname((java.lang.String)value);
+ setPartName((java.lang.String)value);
}
break;
@@ -414,6 +482,22 @@ package org.apache.hadoop.hive.metastore.api;
}
break;
+ case LIMIT:
+ if (value == null) {
+ unsetLimit();
+ } else {
+ setLimit((java.lang.Long)value);
+ }
+ break;
+
+ case ORDER:
+ if (value == null) {
+ unsetOrder();
+ } else {
+ setOrder((java.lang.String)value);
+ }
+ break;
+
}
}
@@ -426,14 +510,14 @@ package org.apache.hadoop.hive.metastore.api;
case POOL_NAME:
return getPoolName();
- case DBNAME:
- return getDbname();
+ case DB_NAME:
+ return getDbName();
- case TABLENAME:
- return getTablename();
+ case TB_NAME:
+ return getTbName();
- case PARTITIONNAME:
- return getPartitionname();
+ case PART_NAME:
+ return getPartName();
case TYPE:
return getType();
@@ -441,6 +525,12 @@ package org.apache.hadoop.hive.metastore.api;
case STATE:
return getState();
+ case LIMIT:
+ return getLimit();
+
+ case ORDER:
+ return getOrder();
+
}
throw new java.lang.IllegalStateException();
}
@@ -456,16 +546,20 @@ package org.apache.hadoop.hive.metastore.api;
return isSetId();
case POOL_NAME:
return isSetPoolName();
- case DBNAME:
- return isSetDbname();
- case TABLENAME:
- return isSetTablename();
- case PARTITIONNAME:
- return isSetPartitionname();
+ case DB_NAME:
+ return isSetDbName();
+ case TB_NAME:
+ return isSetTbName();
+ case PART_NAME:
+ return isSetPartName();
case TYPE:
return isSetType();
case STATE:
return isSetState();
+ case LIMIT:
+ return isSetLimit();
+ case ORDER:
+ return isSetOrder();
}
throw new java.lang.IllegalStateException();
}
@@ -501,30 +595,30 @@ package org.apache.hadoop.hive.metastore.api;
return false;
}
- boolean this_present_dbname = true && this.isSetDbname();
- boolean that_present_dbname = true && that.isSetDbname();
- if (this_present_dbname || that_present_dbname) {
- if (!(this_present_dbname && that_present_dbname))
+ boolean this_present_dbName = true && this.isSetDbName();
+ boolean that_present_dbName = true && that.isSetDbName();
+ if (this_present_dbName || that_present_dbName) {
+ if (!(this_present_dbName && that_present_dbName))
return false;
- if (!this.dbname.equals(that.dbname))
+ if (!this.dbName.equals(that.dbName))
return false;
}
- boolean this_present_tablename = true && this.isSetTablename();
- boolean that_present_tablename = true && that.isSetTablename();
- if (this_present_tablename || that_present_tablename) {
- if (!(this_present_tablename && that_present_tablename))
+ boolean this_present_tbName = true && this.isSetTbName();
+ boolean that_present_tbName = true && that.isSetTbName();
+ if (this_present_tbName || that_present_tbName) {
+ if (!(this_present_tbName && that_present_tbName))
return false;
- if (!this.tablename.equals(that.tablename))
+ if (!this.tbName.equals(that.tbName))
return false;
}
- boolean this_present_partitionname = true && this.isSetPartitionname();
- boolean that_present_partitionname = true && that.isSetPartitionname();
- if (this_present_partitionname || that_present_partitionname) {
- if (!(this_present_partitionname && that_present_partitionname))
+ boolean this_present_partName = true && this.isSetPartName();
+ boolean that_present_partName = true && that.isSetPartName();
+ if (this_present_partName || that_present_partName) {
+ if (!(this_present_partName && that_present_partName))
return false;
- if (!this.partitionname.equals(that.partitionname))
+ if (!this.partName.equals(that.partName))
return false;
}
@@ -546,6 +640,24 @@ package org.apache.hadoop.hive.metastore.api;
return false;
}
+ boolean this_present_limit = true && this.isSetLimit();
+ boolean that_present_limit = true && that.isSetLimit();
+ if (this_present_limit || that_present_limit) {
+ if (!(this_present_limit && that_present_limit))
+ return false;
+ if (this.limit != that.limit)
+ return false;
+ }
+
+ boolean this_present_order = true && this.isSetOrder();
+ boolean that_present_order = true && that.isSetOrder();
+ if (this_present_order || that_present_order) {
+ if (!(this_present_order && that_present_order))
+ return false;
+ if (!this.order.equals(that.order))
+ return false;
+ }
+
return true;
}
@@ -561,17 +673,17 @@ package org.apache.hadoop.hive.metastore.api;
if (isSetPoolName())
hashCode = hashCode * 8191 + poolName.hashCode();
- hashCode = hashCode * 8191 + ((isSetDbname()) ? 131071 : 524287);
- if (isSetDbname())
- hashCode = hashCode * 8191 + dbname.hashCode();
+ hashCode = hashCode * 8191 + ((isSetDbName()) ? 131071 : 524287);
+ if (isSetDbName())
+ hashCode = hashCode * 8191 + dbName.hashCode();
- hashCode = hashCode * 8191 + ((isSetTablename()) ? 131071 : 524287);
- if (isSetTablename())
- hashCode = hashCode * 8191 + tablename.hashCode();
+ hashCode = hashCode * 8191 + ((isSetTbName()) ? 131071 : 524287);
+ if (isSetTbName())
+ hashCode = hashCode * 8191 + tbName.hashCode();
- hashCode = hashCode * 8191 + ((isSetPartitionname()) ? 131071 : 524287);
- if (isSetPartitionname())
- hashCode = hashCode * 8191 + partitionname.hashCode();
+ hashCode = hashCode * 8191 + ((isSetPartName()) ? 131071 : 524287);
+ if (isSetPartName())
+ hashCode = hashCode * 8191 + partName.hashCode();
hashCode = hashCode * 8191 + ((isSetType()) ? 131071 : 524287);
if (isSetType())
@@ -581,6 +693,14 @@ package org.apache.hadoop.hive.metastore.api;
if (isSetState())
hashCode = hashCode * 8191 + state.hashCode();
+ hashCode = hashCode * 8191 + ((isSetLimit()) ? 131071 : 524287);
+ if (isSetLimit())
+ hashCode = hashCode * 8191 +
org.apache.thrift.TBaseHelper.hashCode(limit);
+
+ hashCode = hashCode * 8191 + ((isSetOrder()) ? 131071 : 524287);
+ if (isSetOrder())
+ hashCode = hashCode * 8191 + order.hashCode();
+
return hashCode;
}
@@ -612,32 +732,32 @@ package org.apache.hadoop.hive.metastore.api;
return lastComparison;
}
}
- lastComparison = java.lang.Boolean.compare(isSetDbname(),
other.isSetDbname());
+ lastComparison = java.lang.Boolean.compare(isSetDbName(),
other.isSetDbName());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetDbname()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname,
other.dbname);
+ if (isSetDbName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName,
other.dbName);
if (lastComparison != 0) {
return lastComparison;
}
}
- lastComparison = java.lang.Boolean.compare(isSetTablename(),
other.isSetTablename());
+ lastComparison = java.lang.Boolean.compare(isSetTbName(),
other.isSetTbName());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetTablename()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename,
other.tablename);
+ if (isSetTbName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbName,
other.tbName);
if (lastComparison != 0) {
return lastComparison;
}
}
- lastComparison = java.lang.Boolean.compare(isSetPartitionname(),
other.isSetPartitionname());
+ lastComparison = java.lang.Boolean.compare(isSetPartName(),
other.isSetPartName());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetPartitionname()) {
- lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.partitionname,
other.partitionname);
+ if (isSetPartName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partName,
other.partName);
if (lastComparison != 0) {
return lastComparison;
}
@@ -662,6 +782,26 @@ package org.apache.hadoop.hive.metastore.api;
return lastComparison;
}
}
+ lastComparison = java.lang.Boolean.compare(isSetLimit(),
other.isSetLimit());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetLimit()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.limit,
other.limit);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = java.lang.Boolean.compare(isSetOrder(),
other.isSetOrder());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetOrder()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.order,
other.order);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -698,33 +838,33 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
- if (isSetDbname()) {
+ if (isSetDbName()) {
if (!first) sb.append(", ");
- sb.append("dbname:");
- if (this.dbname == null) {
+ sb.append("dbName:");
+ if (this.dbName == null) {
sb.append("null");
} else {
- sb.append(this.dbname);
+ sb.append(this.dbName);
}
first = false;
}
- if (isSetTablename()) {
+ if (isSetTbName()) {
if (!first) sb.append(", ");
- sb.append("tablename:");
- if (this.tablename == null) {
+ sb.append("tbName:");
+ if (this.tbName == null) {
sb.append("null");
} else {
- sb.append(this.tablename);
+ sb.append(this.tbName);
}
first = false;
}
- if (isSetPartitionname()) {
+ if (isSetPartName()) {
if (!first) sb.append(", ");
- sb.append("partitionname:");
- if (this.partitionname == null) {
+ sb.append("partName:");
+ if (this.partName == null) {
sb.append("null");
} else {
- sb.append(this.partitionname);
+ sb.append(this.partName);
}
first = false;
}
@@ -748,6 +888,22 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
+ if (isSetLimit()) {
+ if (!first) sb.append(", ");
+ sb.append("limit:");
+ sb.append(this.limit);
+ first = false;
+ }
+ if (isSetOrder()) {
+ if (!first) sb.append(", ");
+ sb.append("order:");
+ if (this.order == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.order);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -809,26 +965,26 @@ package org.apache.hadoop.hive.metastore.api;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
- case 3: // DBNAME
+ case 3: // DB_NAME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.dbname = iprot.readString();
- struct.setDbnameIsSet(true);
+ struct.dbName = iprot.readString();
+ struct.setDbNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
- case 4: // TABLENAME
+ case 4: // TB_NAME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.tablename = iprot.readString();
- struct.setTablenameIsSet(true);
+ struct.tbName = iprot.readString();
+ struct.setTbNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
- case 5: // PARTITIONNAME
+ case 5: // PART_NAME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.partitionname = iprot.readString();
- struct.setPartitionnameIsSet(true);
+ struct.partName = iprot.readString();
+ struct.setPartNameIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
@@ -849,6 +1005,22 @@ package org.apache.hadoop.hive.metastore.api;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
+ case 8: // LIMIT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.limit = iprot.readI64();
+ struct.setLimitIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
+ }
+ break;
+ case 9: // ORDER
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.order = iprot.readString();
+ struct.setOrderIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
@@ -874,24 +1046,24 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeFieldEnd();
}
}
- if (struct.dbname != null) {
- if (struct.isSetDbname()) {
- oprot.writeFieldBegin(DBNAME_FIELD_DESC);
- oprot.writeString(struct.dbname);
+ if (struct.dbName != null) {
+ if (struct.isSetDbName()) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(struct.dbName);
oprot.writeFieldEnd();
}
}
- if (struct.tablename != null) {
- if (struct.isSetTablename()) {
- oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
- oprot.writeString(struct.tablename);
+ if (struct.tbName != null) {
+ if (struct.isSetTbName()) {
+ oprot.writeFieldBegin(TB_NAME_FIELD_DESC);
+ oprot.writeString(struct.tbName);
oprot.writeFieldEnd();
}
}
- if (struct.partitionname != null) {
- if (struct.isSetPartitionname()) {
- oprot.writeFieldBegin(PARTITIONNAME_FIELD_DESC);
- oprot.writeString(struct.partitionname);
+ if (struct.partName != null) {
+ if (struct.isSetPartName()) {
+ oprot.writeFieldBegin(PART_NAME_FIELD_DESC);
+ oprot.writeString(struct.partName);
oprot.writeFieldEnd();
}
}
@@ -909,6 +1081,18 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetLimit()) {
+ oprot.writeFieldBegin(LIMIT_FIELD_DESC);
+ oprot.writeI64(struct.limit);
+ oprot.writeFieldEnd();
+ }
+ if (struct.order != null) {
+ if (struct.isSetOrder()) {
+ oprot.writeFieldBegin(ORDER_FIELD_DESC);
+ oprot.writeString(struct.order);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -933,13 +1117,13 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetPoolName()) {
optionals.set(1);
}
- if (struct.isSetDbname()) {
+ if (struct.isSetDbName()) {
optionals.set(2);
}
- if (struct.isSetTablename()) {
+ if (struct.isSetTbName()) {
optionals.set(3);
}
- if (struct.isSetPartitionname()) {
+ if (struct.isSetPartName()) {
optionals.set(4);
}
if (struct.isSetType()) {
@@ -948,21 +1132,27 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetState()) {
optionals.set(6);
}
- oprot.writeBitSet(optionals, 7);
+ if (struct.isSetLimit()) {
+ optionals.set(7);
+ }
+ if (struct.isSetOrder()) {
+ optionals.set(8);
+ }
+ oprot.writeBitSet(optionals, 9);
if (struct.isSetId()) {
oprot.writeI64(struct.id);
}
if (struct.isSetPoolName()) {
oprot.writeString(struct.poolName);
}
- if (struct.isSetDbname()) {
- oprot.writeString(struct.dbname);
+ if (struct.isSetDbName()) {
+ oprot.writeString(struct.dbName);
}
- if (struct.isSetTablename()) {
- oprot.writeString(struct.tablename);
+ if (struct.isSetTbName()) {
+ oprot.writeString(struct.tbName);
}
- if (struct.isSetPartitionname()) {
- oprot.writeString(struct.partitionname);
+ if (struct.isSetPartName()) {
+ oprot.writeString(struct.partName);
}
if (struct.isSetType()) {
oprot.writeI32(struct.type.getValue());
@@ -970,12 +1160,18 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetState()) {
oprot.writeString(struct.state);
}
+ if (struct.isSetLimit()) {
+ oprot.writeI64(struct.limit);
+ }
+ if (struct.isSetOrder()) {
+ oprot.writeString(struct.order);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot,
ShowCompactRequest struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol iprot =
(org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet incoming = iprot.readBitSet(7);
+ java.util.BitSet incoming = iprot.readBitSet(9);
if (incoming.get(0)) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
@@ -985,16 +1181,16 @@ package org.apache.hadoop.hive.metastore.api;
struct.setPoolNameIsSet(true);
}
if (incoming.get(2)) {
- struct.dbname = iprot.readString();
- struct.setDbnameIsSet(true);
+ struct.dbName = iprot.readString();
+ struct.setDbNameIsSet(true);
}
if (incoming.get(3)) {
- struct.tablename = iprot.readString();
- struct.setTablenameIsSet(true);
+ struct.tbName = iprot.readString();
+ struct.setTbNameIsSet(true);
}
if (incoming.get(4)) {
- struct.partitionname = iprot.readString();
- struct.setPartitionnameIsSet(true);
+ struct.partName = iprot.readString();
+ struct.setPartNameIsSet(true);
}
if (incoming.get(5)) {
struct.type =
org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
@@ -1004,6 +1200,14 @@ package org.apache.hadoop.hive.metastore.api;
struct.state = iprot.readString();
struct.setStateIsSet(true);
}
+ if (incoming.get(7)) {
+ struct.limit = iprot.readI64();
+ struct.setLimitIsSet(true);
+ }
+ if (incoming.get(8)) {
+ struct.order = iprot.readString();
+ struct.setOrderIsSet(true);
+ }
}
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php
index 8e4571f6cfd..543e1c9749e 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php
@@ -32,17 +32,17 @@ class ShowCompactRequest
'type' => TType::STRING,
),
3 => array(
- 'var' => 'dbname',
+ 'var' => 'dbName',
'isRequired' => false,
'type' => TType::STRING,
),
4 => array(
- 'var' => 'tablename',
+ 'var' => 'tbName',
'isRequired' => false,
'type' => TType::STRING,
),
5 => array(
- 'var' => 'partitionname',
+ 'var' => 'partName',
'isRequired' => false,
'type' => TType::STRING,
),
@@ -57,6 +57,16 @@ class ShowCompactRequest
'isRequired' => false,
'type' => TType::STRING,
),
+ 8 => array(
+ 'var' => 'limit',
+ 'isRequired' => false,
+ 'type' => TType::I64,
+ ),
+ 9 => array(
+ 'var' => 'order',
+ 'isRequired' => false,
+ 'type' => TType::STRING,
+ ),
);
/**
@@ -70,15 +80,15 @@ class ShowCompactRequest
/**
* @var string
*/
- public $dbname = null;
+ public $dbName = null;
/**
* @var string
*/
- public $tablename = null;
+ public $tbName = null;
/**
* @var string
*/
- public $partitionname = null;
+ public $partName = null;
/**
* @var int
*/
@@ -87,6 +97,14 @@ class ShowCompactRequest
* @var string
*/
public $state = null;
+ /**
+ * @var int
+ */
+ public $limit = null;
+ /**
+ * @var string
+ */
+ public $order = null;
public function __construct($vals = null)
{
@@ -97,14 +115,14 @@ class ShowCompactRequest
if (isset($vals['poolName'])) {
$this->poolName = $vals['poolName'];
}
- if (isset($vals['dbname'])) {
- $this->dbname = $vals['dbname'];
+ if (isset($vals['dbName'])) {
+ $this->dbName = $vals['dbName'];
}
- if (isset($vals['tablename'])) {
- $this->tablename = $vals['tablename'];
+ if (isset($vals['tbName'])) {
+ $this->tbName = $vals['tbName'];
}
- if (isset($vals['partitionname'])) {
- $this->partitionname = $vals['partitionname'];
+ if (isset($vals['partName'])) {
+ $this->partName = $vals['partName'];
}
if (isset($vals['type'])) {
$this->type = $vals['type'];
@@ -112,6 +130,12 @@ class ShowCompactRequest
if (isset($vals['state'])) {
$this->state = $vals['state'];
}
+ if (isset($vals['limit'])) {
+ $this->limit = $vals['limit'];
+ }
+ if (isset($vals['order'])) {
+ $this->order = $vals['order'];
+ }
}
}
@@ -150,21 +174,21 @@ class ShowCompactRequest
break;
case 3:
if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->dbname);
+ $xfer += $input->readString($this->dbName);
} else {
$xfer += $input->skip($ftype);
}
break;
case 4:
if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->tablename);
+ $xfer += $input->readString($this->tbName);
} else {
$xfer += $input->skip($ftype);
}
break;
case 5:
if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->partitionname);
+ $xfer += $input->readString($this->partName);
} else {
$xfer += $input->skip($ftype);
}
@@ -183,6 +207,20 @@ class ShowCompactRequest
$xfer += $input->skip($ftype);
}
break;
+ case 8:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->limit);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 9:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->order);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -207,19 +245,19 @@ class ShowCompactRequest
$xfer += $output->writeString($this->poolName);
$xfer += $output->writeFieldEnd();
}
- if ($this->dbname !== null) {
- $xfer += $output->writeFieldBegin('dbname', TType::STRING, 3);
- $xfer += $output->writeString($this->dbname);
+ if ($this->dbName !== null) {
+ $xfer += $output->writeFieldBegin('dbName', TType::STRING, 3);
+ $xfer += $output->writeString($this->dbName);
$xfer += $output->writeFieldEnd();
}
- if ($this->tablename !== null) {
- $xfer += $output->writeFieldBegin('tablename', TType::STRING, 4);
- $xfer += $output->writeString($this->tablename);
+ if ($this->tbName !== null) {
+ $xfer += $output->writeFieldBegin('tbName', TType::STRING, 4);
+ $xfer += $output->writeString($this->tbName);
$xfer += $output->writeFieldEnd();
}
- if ($this->partitionname !== null) {
- $xfer += $output->writeFieldBegin('partitionname', TType::STRING,
5);
- $xfer += $output->writeString($this->partitionname);
+ if ($this->partName !== null) {
+ $xfer += $output->writeFieldBegin('partName', TType::STRING, 5);
+ $xfer += $output->writeString($this->partName);
$xfer += $output->writeFieldEnd();
}
if ($this->type !== null) {
@@ -232,6 +270,16 @@ class ShowCompactRequest
$xfer += $output->writeString($this->state);
$xfer += $output->writeFieldEnd();
}
+ if ($this->limit !== null) {
+ $xfer += $output->writeFieldBegin('limit', TType::I64, 8);
+ $xfer += $output->writeI64($this->limit);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->order !== null) {
+ $xfer += $output->writeFieldBegin('order', TType::STRING, 9);
+ $xfer += $output->writeString($this->order);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index cec613e5273..57ceaf231e7 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -16101,23 +16101,27 @@ class ShowCompactRequest(object):
Attributes:
- id
- poolName
- - dbname
- - tablename
- - partitionname
+ - dbName
+ - tbName
+ - partName
- type
- state
+ - limit
+ - order
"""
- def __init__(self, id=None, poolName=None, dbname=None, tablename=None,
partitionname=None, type=None, state=None,):
+ def __init__(self, id=None, poolName=None, dbName=None, tbName=None,
partName=None, type=None, state=None, limit=None, order=None,):
self.id = id
self.poolName = poolName
- self.dbname = dbname
- self.tablename = tablename
- self.partitionname = partitionname
+ self.dbName = dbName
+ self.tbName = tbName
+ self.partName = partName
self.type = type
self.state = state
+ self.limit = limit
+ self.order = order
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans,
TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -16140,17 +16144,17 @@ class ShowCompactRequest(object):
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
- self.dbname = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ self.dbName = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
- self.tablename = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ self.tbName = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
- self.partitionname = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ self.partName = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
@@ -16163,6 +16167,16 @@ class ShowCompactRequest(object):
self.state = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.I64:
+ self.limit = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 9:
+ if ftype == TType.STRING:
+ self.order = iprot.readString().decode('utf-8',
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -16181,17 +16195,17 @@ class ShowCompactRequest(object):
oprot.writeFieldBegin('poolName', TType.STRING, 2)
oprot.writeString(self.poolName.encode('utf-8') if
sys.version_info[0] == 2 else self.poolName)
oprot.writeFieldEnd()
- if self.dbname is not None:
- oprot.writeFieldBegin('dbname', TType.STRING, 3)
- oprot.writeString(self.dbname.encode('utf-8') if
sys.version_info[0] == 2 else self.dbname)
+ if self.dbName is not None:
+ oprot.writeFieldBegin('dbName', TType.STRING, 3)
+ oprot.writeString(self.dbName.encode('utf-8') if
sys.version_info[0] == 2 else self.dbName)
oprot.writeFieldEnd()
- if self.tablename is not None:
- oprot.writeFieldBegin('tablename', TType.STRING, 4)
- oprot.writeString(self.tablename.encode('utf-8') if
sys.version_info[0] == 2 else self.tablename)
+ if self.tbName is not None:
+ oprot.writeFieldBegin('tbName', TType.STRING, 4)
+ oprot.writeString(self.tbName.encode('utf-8') if
sys.version_info[0] == 2 else self.tbName)
oprot.writeFieldEnd()
- if self.partitionname is not None:
- oprot.writeFieldBegin('partitionname', TType.STRING, 5)
- oprot.writeString(self.partitionname.encode('utf-8') if
sys.version_info[0] == 2 else self.partitionname)
+ if self.partName is not None:
+ oprot.writeFieldBegin('partName', TType.STRING, 5)
+ oprot.writeString(self.partName.encode('utf-8') if
sys.version_info[0] == 2 else self.partName)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 6)
@@ -16201,6 +16215,14 @@ class ShowCompactRequest(object):
oprot.writeFieldBegin('state', TType.STRING, 7)
oprot.writeString(self.state.encode('utf-8') if
sys.version_info[0] == 2 else self.state)
oprot.writeFieldEnd()
+ if self.limit is not None:
+ oprot.writeFieldBegin('limit', TType.I64, 8)
+ oprot.writeI64(self.limit)
+ oprot.writeFieldEnd()
+ if self.order is not None:
+ oprot.writeFieldBegin('order', TType.STRING, 9)
+ oprot.writeString(self.order.encode('utf-8') if
sys.version_info[0] == 2 else self.order)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31179,11 +31201,13 @@ ShowCompactRequest.thrift_spec = (
None, # 0
(1, TType.I64, 'id', None, None, ), # 1
(2, TType.STRING, 'poolName', 'UTF8', None, ), # 2
- (3, TType.STRING, 'dbname', 'UTF8', None, ), # 3
- (4, TType.STRING, 'tablename', 'UTF8', None, ), # 4
- (5, TType.STRING, 'partitionname', 'UTF8', None, ), # 5
+ (3, TType.STRING, 'dbName', 'UTF8', None, ), # 3
+ (4, TType.STRING, 'tbName', 'UTF8', None, ), # 4
+ (5, TType.STRING, 'partName', 'UTF8', None, ), # 5
(6, TType.I32, 'type', None, None, ), # 6
(7, TType.STRING, 'state', 'UTF8', None, ), # 7
+ (8, TType.I64, 'limit', None, None, ), # 8
+ (9, TType.STRING, 'order', 'UTF8', None, ), # 9
)
all_structs.append(ShowCompactResponseElement)
ShowCompactResponseElement.thrift_spec = (
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 11ec76c2d3a..783259c6f10 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -4662,19 +4662,23 @@ class ShowCompactRequest
ID = 1
POOLNAME = 2
DBNAME = 3
- TABLENAME = 4
- PARTITIONNAME = 5
+ TBNAME = 4
+ PARTNAME = 5
TYPE = 6
STATE = 7
+ LIMIT = 8
+ ORDER = 9
FIELDS = {
ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true},
POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName',
:optional => true},
- DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname', :optional
=> true},
- TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename',
:optional => true},
- PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name =>
'partitionname', :optional => true},
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional
=> true},
+ TBNAME => {:type => ::Thrift::Types::STRING, :name => 'tbName', :optional
=> true},
+ PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partName',
:optional => true},
TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :optional =>
true, :enum_class => ::CompactionType},
- STATE => {:type => ::Thrift::Types::STRING, :name => 'state', :optional =>
true}
+ STATE => {:type => ::Thrift::Types::STRING, :name => 'state', :optional =>
true},
+ LIMIT => {:type => ::Thrift::Types::I64, :name => 'limit', :optional =>
true},
+ ORDER => {:type => ::Thrift::Types::STRING, :name => 'order', :optional =>
true}
}
def struct_fields; FIELDS; end
diff --git
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 131f86ac1da..d75a73ee2cb 100644
---
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1347,11 +1347,13 @@ struct CompactionResponse {
struct ShowCompactRequest {
1: optional i64 id,
2: optional string poolName,
- 3: optional string dbname,
- 4: optional string tablename,
- 5: optional string partitionname,
+ 3: optional string dbName,
+ 4: optional string tbName,
+ 5: optional string partName,
6: optional CompactionType type,
- 7: optional string state
+ 7: optional string state,
+ 8: optional i64 limit,
+ 9: optional string order
}
struct ShowCompactResponseElement {
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 1d70f7f0f04..73858046f62 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -3885,8 +3885,9 @@ abstract class TxnHandler implements TxnStore,
TxnStore.MutexAPI {
public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws
MetaException {
try {
ShowCompactResponse response = new ShowCompactResponse(new
ArrayList<>());
- String query = TxnQueries.SHOW_COMPACTION_QUERY +
getShowCompactFilterClause(rqst) +
- TxnQueries.SHOW_COMPACTION_ORDERBY_CLAUSE;
+ String query = TxnQueries.SHOW_COMPACTION_QUERY +
+ getShowCompactFilterClause(rqst) +
+ getShowCompactSortingOrderClause(rqst);
List<String> params = getShowCompactParamList(rqst);
try (Connection dbConn =
getDbConn(Connection.TRANSACTION_READ_COMMITTED);
@@ -3894,6 +3895,10 @@ abstract class TxnHandler implements TxnStore,
TxnStore.MutexAPI {
if (rqst.isSetId()) {
stmt.setLong(1, rqst.getId());
}
+ int rowLimit = (int) rqst.getLimit();
+ if (rowLimit > 0) {
+ stmt.setMaxRows(rowLimit);
+ }
LOG.debug("Going to execute query <" + query + ">");
try (ResultSet rs = stmt.executeQuery()) {
while (rs.next()) {
@@ -3955,14 +3960,19 @@ abstract class TxnHandler implements TxnStore,
TxnStore.MutexAPI {
}
}
+ private String getShowCompactSortingOrderClause(ShowCompactRequest request) {
+ String sortingOrder = request.getOrder();
+ return isNotBlank(sortingOrder) ? " ORDER BY " + sortingOrder :
TxnQueries.SHOW_COMPACTION_ORDERBY_CLAUSE;
+ }
+
private List<String> getShowCompactParamList(ShowCompactRequest request)
throws MetaException {
if (request.getId() > 0) {
return Collections.emptyList();
}
String poolName = request.getPoolName();
- String dbName = request.getDbname();
- String tableName = request.getTablename();
- String partName = request.getPartitionname();
+ String dbName = request.getDbName();
+ String tableName = request.getTbName();
+ String partName = request.getPartName();
CompactionType type = request.getType();
String state = request.getState();
@@ -3994,13 +4004,13 @@ abstract class TxnHandler implements TxnStore,
TxnStore.MutexAPI {
if (request.getId() > 0) {
params.add("\"CC_ID\"=?");
} else {
- if (isNotBlank(request.getDbname())) {
+ if (isNotBlank(request.getDbName())) {
params.add("\"CC_DATABASE\"=?");
}
- if (isNotBlank(request.getTablename())) {
+ if (isNotBlank(request.getTbName())) {
params.add("\"CC_TABLE\"=?");
}
- if (isNotBlank(request.getPartitionname())) {
+ if (isNotBlank(request.getPartName())) {
params.add("\"CC_PARTITION\"=?");
}
if (isNotBlank(request.getState())) {