[
https://issues.apache.org/jira/browse/HIVE-26580?focusedWorklogId=826402&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-826402
]
ASF GitHub Bot logged work on HIVE-26580:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 16/Nov/22 05:33
Start Date: 16/Nov/22 05:33
Worklog Time Spent: 10m
Work Description: rkirtir commented on code in PR #3708:
URL: https://github.com/apache/hive/pull/3708#discussion_r1023522209
##########
ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java:
##########
@@ -80,15 +83,64 @@ public void analyzeInternal(ASTNode root) throws
SemanticException {
case HiveParser.TOK_COMPACT_ID:
compactionId = Long.parseLong(child.getChild(0).getText());
break;
+ case HiveParser.TOK_LIMIT:
+ limit = Short.valueOf((child.getChild(0)).getText());
+ break;
+ case HiveParser.TOK_ORDERBY:
+ orderBy = processSortOrderSpec(child);
+ break;
default:
dbName = stripQuotes(child.getText());
}
}
ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile(),
compactionId, dbName, tblName, poolName, compactionType,
- compactionStatus, partitionSpec);
+ compactionStatus, partitionSpec, limit, orderBy);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(),
getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowCompactionsDesc.SCHEMA));
}
+
+
+ private String processSortOrderSpec(ASTNode sortNode) {
+ List<PTFInvocationSpec.OrderExpression> orderExp =
processOrderSpec(sortNode).getExpressions();
+ Map<String, String> orderByAttributes = orderExp.stream().collect(
+ Collectors.toMap(x -> {
+ return x.getExpression().getChild(0)==null ?
+
CompactionColumn.valueOf(x.getExpression().getText().replaceAll("\'","").toUpperCase()).toString():
+
CompactionColumn.valueOf(x.getExpression().getChild(0).getText()).toString();
+ }, x -> x.getOrder().toString()));
+ return orderByAttributes.entrySet().stream().map(e -> e.getKey() + "\t" +
e.getValue()).collect(Collectors.joining(","));
+ }
+ public enum CompactionColumn {
+ COMPACTIONID("CC_ID"),
+ PARTITION("CC_PARTITION"),
+ DATABASE("CC_DATABASE"),
+ TABLE("CC_TABLE"),
+ TYPE("CC_TYPE"),
+ STATE("CC_STATE"),
+ WORKER_HOST("CC_WORKER_ID"),
+ ENQUEUE_TIME("CC_ENQUEUE_TIME"),
+ POOLNAME("CC_POOL_NAME"),
+ WORKER("CC_WORKER_VERSION"),
+ START_TIME("CC_START"),
+ TXNID ("CC_TXN_ID"),
+ HADOOP_JOB_ID("CC_HADOOP_JOB_ID"),
+ NEXT_TXN_ID("CC_NEXT_TXN_ID"),
+ HIGHEST_WRITE_ID("CC_HIGHEST_WRITE_ID");
+
+ private final String colVal;
+ CompactionColumn(String colVal) {
+ this.colVal = colVal;
+ }
+ public String getColValue() {
Review Comment:
fixed
##########
ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java:
##########
@@ -80,15 +83,64 @@ public void analyzeInternal(ASTNode root) throws
SemanticException {
case HiveParser.TOK_COMPACT_ID:
compactionId = Long.parseLong(child.getChild(0).getText());
break;
+ case HiveParser.TOK_LIMIT:
+ limit = Short.valueOf((child.getChild(0)).getText());
+ break;
+ case HiveParser.TOK_ORDERBY:
+ orderBy = processSortOrderSpec(child);
+ break;
default:
dbName = stripQuotes(child.getText());
}
}
ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile(),
compactionId, dbName, tblName, poolName, compactionType,
- compactionStatus, partitionSpec);
+ compactionStatus, partitionSpec, limit, orderBy);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(),
getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowCompactionsDesc.SCHEMA));
}
+
+
+ private String processSortOrderSpec(ASTNode sortNode) {
+ List<PTFInvocationSpec.OrderExpression> orderExp =
processOrderSpec(sortNode).getExpressions();
+ Map<String, String> orderByAttributes = orderExp.stream().collect(
+ Collectors.toMap(x -> {
+ return x.getExpression().getChild(0)==null ?
+
CompactionColumn.valueOf(x.getExpression().getText().replaceAll("\'","").toUpperCase()).toString():
+
CompactionColumn.valueOf(x.getExpression().getChild(0).getText()).toString();
+ }, x -> x.getOrder().toString()));
+ return orderByAttributes.entrySet().stream().map(e -> e.getKey() + "\t" +
e.getValue()).collect(Collectors.joining(","));
+ }
+ public enum CompactionColumn {
Review Comment:
fixed
Issue Time Tracking
-------------------
Worklog Id: (was: 826402)
Time Spent: 10h (was: 9h 50m)
> SHOW COMPACTIONS should support ordering and limiting functionality in
> filtering options
> ----------------------------------------------------------------------------------------
>
> Key: HIVE-26580
> URL: https://issues.apache.org/jira/browse/HIVE-26580
> Project: Hive
> Issue Type: Improvement
> Affects Versions: 3.0.0
> Reporter: KIRTI RUGE
> Assignee: KIRTI RUGE
> Priority: Major
> Labels: pull-request-available
> Time Spent: 10h
> Remaining Estimate: 0h
>
> SHOW COMPACTION should provide ordering by defied table . It should also
> support limitation of fetched records
--
This message was sent by Atlassian Jira
(v8.20.10#820010)