Github user ajithme commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2589#discussion_r207433215
--- Diff:
store/sdk/src/main/java/org/apache/carbondata/sdk/store/ScannerImpl.java ---
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.sdk.store;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Random;
+import java.util.stream.Collectors;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastore.row.CarbonRow;
+import org.apache.carbondata.core.metadata.schema.table.TableInfo;
+import org.apache.carbondata.core.scan.expression.Expression;
+import org.apache.carbondata.hadoop.CarbonInputSplit;
+import org.apache.carbondata.hadoop.CarbonMultiBlockSplit;
+import org.apache.carbondata.hadoop.api.CarbonInputFormat;
+import org.apache.carbondata.sdk.store.conf.StoreConf;
+import org.apache.carbondata.sdk.store.descriptor.ScanDescriptor;
+import org.apache.carbondata.sdk.store.descriptor.TableIdentifier;
+import org.apache.carbondata.sdk.store.exception.CarbonException;
+import org.apache.carbondata.sdk.store.service.DataService;
+import org.apache.carbondata.sdk.store.service.PruneService;
+import org.apache.carbondata.sdk.store.service.ServiceFactory;
+import org.apache.carbondata.sdk.store.service.model.PruneRequest;
+import org.apache.carbondata.sdk.store.service.model.PruneResponse;
+import org.apache.carbondata.sdk.store.service.model.ScanRequest;
+import org.apache.carbondata.sdk.store.service.model.ScanResponse;
+
+import org.apache.hadoop.conf.Configuration;
+
+class ScannerImpl implements Scanner {
+ private static final LogService LOGGER =
+
LogServiceFactory.getLogService(ScannerImpl.class.getCanonicalName());
+
+ private PruneService pruneService;
+ private TableInfo tableInfo;
+
+ ScannerImpl(StoreConf conf, TableInfo tableInfo) throws IOException {
+ this.pruneService = ServiceFactory.createPruneService(
+ conf.masterHost(), conf.registryServicePort());
+ this.tableInfo = tableInfo;
+ }
+
+ /**
+ * Trigger a RPC to Carbon Master to do pruning
+ * @param table table identifier
+ * @param filterExpression expression of filter predicate given by user
+ * @return list of ScanUnit
+ * @throws CarbonException if any error occurs
+ */
+ @Override
+ public List<ScanUnit> prune(TableIdentifier table, Expression
filterExpression)
+ throws CarbonException {
+ try {
+ Configuration configuration = new Configuration();
+ CarbonInputFormat.setTableName(configuration, table.getTableName());
--- End diff --
can use CarbonInputFormat.setTableInfo(configuration, tableInfo); else
org.apache.carbondata.hadoop.api.CarbonInputFormat#getAbsoluteTableIdentifier
will have empty path
---