jon-wei commented on a change in pull request #7133: 6088 - Time Ordering On 
Scans
URL: https://github.com/apache/incubator-druid/pull/7133#discussion_r267602505
 
 

 ##########
 File path: 
processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java
 ##########
 @@ -68,34 +81,117 @@ public ScanQueryRunnerFactory(
   )
   {
     // in single thread and in jetty thread instead of processing thread
-    return new QueryRunner<ScanResultValue>()
-    {
-      @Override
-      public Sequence<ScanResultValue> run(
-          final QueryPlus<ScanResultValue> queryPlus, final Map<String, 
Object> responseContext
-      )
-      {
-        // Note: this variable is effective only when queryContext has a 
timeout.
-        // See the comment of CTX_TIMEOUT_AT.
-        final long timeoutAt = System.currentTimeMillis() + 
QueryContexts.getTimeout(queryPlus.getQuery());
-        responseContext.put(CTX_TIMEOUT_AT, timeoutAt);
+    return (queryPlus, responseContext) -> {
+      ScanQuery query = (ScanQuery) queryPlus.getQuery();
+      int numSegments = 0;
+      final Iterator<QueryRunner<ScanResultValue>> segmentIt = 
queryRunners.iterator();
+      for (; segmentIt.hasNext(); numSegments++) {
+        segmentIt.next();
+      }
+      // Note: this variable is effective only when queryContext has a timeout.
+      // See the comment of CTX_TIMEOUT_AT.
+      final long timeoutAt = System.currentTimeMillis() + 
QueryContexts.getTimeout(queryPlus.getQuery());
+      responseContext.put(CTX_TIMEOUT_AT, timeoutAt);
+      if (query.getOrder().equals(ScanQuery.Order.NONE)) {
+        // Use normal strategy
         return Sequences.concat(
             Sequences.map(
                 Sequences.simple(queryRunners),
-                new Function<QueryRunner<ScanResultValue>, 
Sequence<ScanResultValue>>()
-                {
-                  @Override
-                  public Sequence<ScanResultValue> apply(final 
QueryRunner<ScanResultValue> input)
-                  {
-                    return input.run(queryPlus, responseContext);
-                  }
-                }
+                input -> input.run(queryPlus, responseContext)
             )
         );
+      } else if (query.getLimit() <= 
scanQueryConfig.getMaxRowsQueuedForTimeOrdering()) {
+        // Use priority queue strategy
+        return sortAndLimitScanResultValues(
+            Sequences.concat(Sequences.map(
+                Sequences.simple(queryRunners),
+                input -> input.run(queryPlus, responseContext)
+            )),
+            query
+        );
+      } else if (numSegments <= 
scanQueryConfig.getMaxSegmentsTimeOrderedInMemory()) {
+        // Use n-way merge strategy
+        final Sequence<ScanResultValue> unbatched =
+            Sequences.map(
+                Sequences.simple(queryRunners),
+                (input) -> Sequences.concat(
+                    Sequences.map(
+                        input.run(queryPlus, responseContext),
+                        srv -> 
Sequences.simple(srv.toSingleEventScanResultValues())
+                    )
+                )
+            ).flatMerge(
+                seq -> seq,
+                Ordering.from(new ScanResultValueTimestampComparator(
+                    query
+                )).reverse() // This needs to be reversed because
+            ).limit(
+                Math.toIntExact(query.getLimit())
+            );
+
+        return unbatched;
       }
+      throw new UOE(
+          "Time ordering for queries of %,d segments per historical and a row 
limit of %,d is not supported."
+          + "  Try reducing the scope of the query to scan fewer segments than 
the configurable segment limit of"
+          + " %,d segments or lower the row limit below %,d.",
+          numSegments,
+          query.getLimit(),
+          scanQueryConfig.getMaxSegmentsTimeOrderedInMemory(),
+          scanQueryConfig.getMaxRowsQueuedForTimeOrdering()
+      );
     };
   }
 
+  @VisibleForTesting
+  Sequence<ScanResultValue> sortAndLimitScanResultValues(
+      Sequence<ScanResultValue> inputSequence,
+      ScanQuery scanQuery
+  )
+  {
+    Comparator<ScanResultValue> priorityQComparator = new 
ScanResultValueTimestampComparator(scanQuery);
+
+    // Converting the limit from long to int could theoretically throw an 
ArithmeticException but this branch
+    // only runs if limit < MAX_LIMIT_FOR_IN_MEMORY_TIME_ORDERING (which 
should be < Integer.MAX_VALUE)
+    int limit = Math.toIntExact(scanQuery.getLimit());
+
+    PriorityQueue<ScanResultValue> q = new PriorityQueue<>(limit, 
priorityQComparator);
+
+    Yielder<ScanResultValue> yielder = inputSequence.toYielder(
+        null,
+        new YieldingAccumulator<ScanResultValue, ScanResultValue>()
+        {
+          @Override
+          public ScanResultValue accumulate(ScanResultValue accumulated, 
ScanResultValue in)
+          {
+            yield();
+            return in;
+          }
+        }
+    );
+    while (!yielder.isDone()) {
+      ScanResultValue next = yielder.get();
+      List<ScanResultValue> singleEventScanResultValues = 
next.toSingleEventScanResultValues();
+      for (ScanResultValue srv : singleEventScanResultValues) {
 
 Review comment:
   I would make it conditional (maybe you have more than Int.MAX_VALUE rows?)

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@druid.apache.org
For additional commands, e-mail: commits-h...@druid.apache.org

Reply via email to