kangkaisen commented on a change in pull request #4430:
URL: https://github.com/apache/incubator-doris/pull/4430#discussion_r476402777



##########
File path: fe/fe-core/src/main/java/org/apache/doris/planner/OdbcScanNode.java
##########
@@ -0,0 +1,200 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.planner;
+
+import com.google.common.base.CharMatcher;
+import org.apache.doris.analysis.Analyzer;
+import org.apache.doris.analysis.Expr;
+import org.apache.doris.analysis.ExprSubstitutionMap;
+import org.apache.doris.analysis.SlotDescriptor;
+import org.apache.doris.analysis.SlotRef;
+import org.apache.doris.analysis.TupleDescriptor;
+import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.OdbcTable;
+import org.apache.doris.common.UserException;
+import org.apache.doris.thrift.TExplainLevel;
+import org.apache.doris.thrift.TOdbcScanNode;
+import org.apache.doris.thrift.TOdbcTableType;
+import org.apache.doris.thrift.TPlanNode;
+import org.apache.doris.thrift.TPlanNodeType;
+import org.apache.doris.thrift.TScanRangeLocations;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.Lists;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Full scan of an ODBC table.
+ */
+public class OdbcScanNode extends ScanNode {
+    private static final Logger LOG = LogManager.getLogger(OdbcScanNode.class);
+
+    private static String mysqlProperName(String name) {
+        return "`" + name + "`";
+    }
+
+    private static String databaseProperName(TOdbcTableType tableType, String 
name) {
+        switch (tableType) {
+            case MYSQL:
+                return mysqlProperName(name);
+        }
+
+        return name;
+    }
+
+    // now we do not support push down filter with char not in ASCII
+    private static boolean isASCIIString(String filter) {
+        return CharMatcher.ascii().matchesAllOf(filter);
+    }
+
+    private final List<String> columns = new ArrayList<String>();
+    private final List<String> filters = new ArrayList<String>();
+    private String tblName;
+    private String driver;
+    private TOdbcTableType odbcType;
+
+    /**
+     * Constructs node to scan given data files of table 'tbl'.
+     */
+    public OdbcScanNode(PlanNodeId id, TupleDescriptor desc, OdbcTable tbl) {
+        super(id, desc, "SCAN ODBC");
+        driver = tbl.getOdbcDriver();
+        odbcType = tbl.getOdbcTableType();
+        tblName = databaseProperName(odbcType, tbl.getOdbcTableName());
+    }
+
+    @Override
+    protected String debugString() {
+        MoreObjects.ToStringHelper helper = MoreObjects.toStringHelper(this);
+        return helper.addValue(super.debugString()).toString();
+    }
+
+    @Override
+    public void finalize(Analyzer analyzer) throws UserException {
+        // Convert predicates to Odbc columns and filters.
+        createOdbcColumns(analyzer);
+        createOdbcFilters(analyzer);
+        computeStats(analyzer);
+    }
+
+    @Override
+    protected String getNodeExplainString(String prefix, TExplainLevel 
detailLevel) {
+        StringBuilder output = new StringBuilder();
+        output.append(prefix).append("TABLE: ").append(tblName).append("\n");
+        output.append(prefix).append("Query: 
").append(getOdbcQueryStr()).append("\n");
+        return output.toString();
+    }
+
+    private String getOdbcQueryStr() {
+        StringBuilder sql = new StringBuilder("SELECT ");
+        sql.append(Joiner.on(", ").join(columns));
+        sql.append(" FROM ").append(tblName);
+
+        if (!filters.isEmpty()) {
+            sql.append(" WHERE (");
+            sql.append(Joiner.on(") AND (").join(filters));
+            sql.append(")");
+        }
+        return sql.toString();
+    }
+
+    private void createOdbcColumns(Analyzer analyzer) {
+        for (SlotDescriptor slot : desc.getSlots()) {
+            if (!slot.isMaterialized()) {
+                continue;
+            }
+            Column col = slot.getColumn();
+            columns.add(databaseProperName(odbcType, col.getName()));
+        }
+        // this happens when count(*)
+        if (0 == columns.size()) {
+            columns.add("*");
+        }
+    }
+
+    // We convert predicates of the form <slotref> op <constant> to Odbc 
filters
+    private void createOdbcFilters(Analyzer analyzer) {
+        if (conjuncts.isEmpty()) {
+            return;
+
+        }
+        List<SlotRef> slotRefs = Lists.newArrayList();
+        Expr.collectList(conjuncts, SlotRef.class, slotRefs);
+        ExprSubstitutionMap sMap = new ExprSubstitutionMap();
+        for (SlotRef slotRef : slotRefs) {
+            SlotRef tmpRef = (SlotRef) slotRef.clone();
+            tmpRef.setTblName(null);
+            tmpRef.setLabel(databaseProperName(odbcType, 
tmpRef.getColumnName()));
+            sMap.put(slotRef, tmpRef);
+        }
+        ArrayList<Expr> odbcConjuncts = Expr.cloneList(conjuncts, sMap);
+        for (Expr p : odbcConjuncts) {
+            String filter = p.toMySql();
+            if (isASCIIString(filter)) {
+                filters.add(filter);
+                conjuncts.remove(p);
+            }
+        }
+    }
+
+    @Override
+    protected void toThrift(TPlanNode msg) {
+        msg.node_type = TPlanNodeType.ODBC_SCAN_NODE;
+
+        TOdbcScanNode odbcScanNode = new TOdbcScanNode();
+        odbcScanNode.tuple_id = desc.getId().asInt();
+        odbcScanNode.table_name = tblName;
+        odbcScanNode.driver = driver;
+        odbcScanNode.type = odbcType;
+        odbcScanNode.columns = columns;
+        odbcScanNode.filters = filters;
+
+        msg.odbc_scan_node = odbcScanNode;
+    }
+
+    /**
+     * We query Odbc Meta to get request's data location
+     * extra result info will pass to backend ScanNode
+     */
+    @Override
+    public List<TScanRangeLocations> getScanRangeLocations(long 
maxScanRangeLength) {
+        return null;
+    }
+
+    @Override
+    public int getNumInstances() {
+        return 1;
+    }
+
+    @Override
+    public void computeStats(Analyzer analyzer) {
+        super.computeStats(analyzer);
+        // even if current node scan has no data,at least on backend will be 
assigned when the fragment actually execute
+        numNodes = numNodes <= 0 ? 1 : numNodes;
+        // this is just to avoid mysql scan node's cardinality being -1. So 
that we can calculate the join cost

Review comment:
       ```suggestion
           // this is just to avoid odbc scan node's cardinality being -1. So 
that we can calculate the join cost
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to