wsjz commented on code in PR #27835:
URL: https://github.com/apache/doris/pull/27835#discussion_r1418844271
##########
fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java:
##########
@@ -1691,28 +1692,53 @@ private void
handleShowMaxComputeTablePartitions(ShowPartitionsStmt showStmt) {
resultSet = new ShowResultSet(showStmt.getMetaData(), rows);
}
- private void handleShowHMSTablePartitions(ShowPartitionsStmt showStmt) {
+ private void handleShowHMSTablePartitions(ShowPartitionsStmt showStmt)
throws AnalysisException {
HMSExternalCatalog catalog = (HMSExternalCatalog)
(showStmt.getCatalog());
List<List<String>> rows = new ArrayList<>();
String dbName =
ClusterNamespace.getNameFromFullName(showStmt.getTableName().getDb());
List<String> partitionNames;
LimitElement limit = showStmt.getLimitElement();
- if (limit != null && limit.hasLimit()) {
- // only limit is valid on Hive
+ Map<String, Expr> filterMap = showStmt.getFilterMap();
+ List<OrderByPair> orderByPairs = showStmt.getOrderByPairs();
+
+ if (limit != null && limit.hasLimit() && limit.getOffset() == 0
+ && (orderByPairs == null || !orderByPairs.get(0).isDesc())) {
+ // hmsClient returns unordered partition list, hence if offset > 0
cannot pass limit
partitionNames = catalog.getClient()
- .listPartitionNames(dbName,
showStmt.getTableName().getTbl(), limit.getLimit());
+ .listPartitionNames(dbName, showStmt.getTableName().getTbl(),
limit.getLimit());
} else {
partitionNames = catalog.getClient().listPartitionNames(dbName,
showStmt.getTableName().getTbl());
}
+
+ /* Filter add rows */
for (String partition : partitionNames) {
List<String> list = new ArrayList<>();
+
+ if (filterMap != null && !filterMap.isEmpty()) {
+ if
(!PartitionsProcDir.filter(ShowPartitionsStmt.FILTER_PARTITION_NAME, partition,
filterMap)) {
+ continue;
+ }
+ }
list.add(partition);
rows.add(list);
}
// sort by partition name
- rows.sort(Comparator.comparing(x -> x.get(0)));
+ if (orderByPairs != null && orderByPairs.get(0).isDesc()) {
+ rows.sort(Comparator.comparing(x -> x.get(0),
Comparator.reverseOrder()));
+ } else {
+ rows.sort(Comparator.comparing(x -> x.get(0)));
+ }
+
+ if (limit != null && limit.hasLimit()) {
+ int beginIndex = (int) limit.getOffset();
+ int endIndex = (int) (beginIndex + limit.getLimit());
+ if (endIndex > rows.size()) {
+ endIndex = rows.size();
+ }
+ rows = rows.subList(beginIndex, endIndex);
Review Comment:
The above limit has already been limited, and if you limit it again, the
final limit will be less than expected. For example, if offset is 5 and limit
is 10, then 10 rows will be extracted from hive according to your logic, and
the result obtained by sublist will only be 5 rows starting from 5. But we need
10 rows
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]