Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2148#discussion_r183077945
--- Diff:
store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
---
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store.worker;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datamap.DataMapChooser;
+import org.apache.carbondata.core.datamap.DataMapLevel;
+import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
+import org.apache.carbondata.core.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.datastore.row.CarbonRow;
+import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.TableInfo;
+import
org.apache.carbondata.core.readcommitter.LatestFilesReadCommittedScope;
+import org.apache.carbondata.core.scan.expression.Expression;
+import org.apache.carbondata.core.scan.model.QueryModel;
+import org.apache.carbondata.core.scan.model.QueryModelBuilder;
+import org.apache.carbondata.hadoop.CarbonInputSplit;
+import org.apache.carbondata.hadoop.CarbonMultiBlockSplit;
+import org.apache.carbondata.hadoop.CarbonRecordReader;
+import org.apache.carbondata.hadoop.readsupport.impl.CarbonRowReadSupport;
+
+import org.apache.spark.search.SearchRequest;
+import org.apache.spark.search.SearchResult;
+import org.apache.spark.search.ShutdownRequest;
+import org.apache.spark.search.ShutdownResponse;
+
+/**
+ * Thread runnable for handling SearchRequest from master.
+ */
[email protected]
+public class SearchRequestHandler {
+
+ private static final LogService LOG =
+
LogServiceFactory.getLogService(SearchRequestHandler.class.getName());
+
+ public SearchResult handleSearch(SearchRequest request) {
+ try {
+ List<CarbonRow> rows = handleRequest(request);
+ return createSuccessResponse(request, rows);
+ } catch (IOException | InterruptedException e) {
+ LOG.error(e);
+ return createFailureResponse(request, e);
+ }
+ }
+
+ public ShutdownResponse handleShutdown(ShutdownRequest request) {
+ return new ShutdownResponse(Status.SUCCESS.ordinal(), "");
+ }
+
+ /**
+ * Builds {@link QueryModel} and read data from files
+ */
+ private List<CarbonRow> handleRequest(SearchRequest request)
+ throws IOException, InterruptedException {
+ TableInfo tableInfo = request.tableInfo();
+ CarbonTable table = CarbonTable.buildFromTableInfo(tableInfo);
+ QueryModel queryModel = createQueryModel(table, request);
+ CarbonMultiBlockSplit mbSplit = request.split().value();
+ long limit = request.limit();
+ long rowCount = 0;
+
+ // If there is FGDataMap, prune the split by applying FGDataMap
+ queryModel = tryPruneByFGDataMap(table, queryModel, mbSplit);
+
+ // In search mode, reader will read multiple blocks by using a thread
pool
+ CarbonRecordReader<CarbonRow> reader =
+ new CarbonRecordReader<>(queryModel, new CarbonRowReadSupport());
+ reader.initialize(mbSplit, null);
+
+ // read all rows by the reader
+ List<CarbonRow> rows = new LinkedList<>();
+ try {
+ while (reader.nextKeyValue() && rowCount < limit) {
--- End diff --
I think using Long.MaxValue better than using -1, code is unified
---