yuqi1129 commented on a change in pull request #902:
URL: https://github.com/apache/incubator-iotdb/pull/902#discussion_r488604981



##########
File path: calcite/src/test/java/org/apache/iotdb/calcite/IoTDBAdapterTest.java
##########
@@ -0,0 +1,339 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.calcite;
+
+import com.google.common.collect.ImmutableMap;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import org.apache.calcite.test.CalciteAssert;
+import org.apache.calcite.util.Sources;
+import org.apache.iotdb.db.utils.EnvironmentUtils;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class IoTDBAdapterTest {
+
+  public static final ImmutableMap<String, String> MODEL =
+      ImmutableMap.of("model",
+          Sources.of(IoTDBAdapterTest.class.getResource("/model.json"))
+              .file().getAbsolutePath());
+  private static String[] sqls = new String[]{
+
+      "SET STORAGE GROUP TO root.vehicle",
+      "SET STORAGE GROUP TO root.other",
+
+      "CREATE timeSERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, 
ENCODING=PLAIN",
+      "CREATE timeSERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, 
ENCODING=PLAIN",
+
+      "CREATE timeSERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE timeSERIES root.vehicle.d1.s3 WITH DATATYPE=TEXT, 
ENCODING=PLAIN",
+      "CREATE timeSERIES root.vehicle.d1.s4 WITH DATATYPE=BOOLEAN, 
ENCODING=PLAIN",
+
+      "CREATE timeSERIES root.other.d1.s0 WITH DATATYPE=FLOAT, ENCODING=RLE",
+
+      "insert into root.vehicle.d0(timestamp,s0) values(1,101)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s0) values(100,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(101,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(102,80)",
+      "insert into root.vehicle.d0(timestamp,s0) values(103,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(104,90)",
+      "insert into root.vehicle.d0(timestamp,s0) values(105,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(106,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(50,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(1000,22222)",
+
+      "insert into root.vehicle.d0(timestamp,s1) values(1,1101)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s1) values(100,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(101,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(102,180)",
+      "insert into root.vehicle.d0(timestamp,s1) values(103,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(104,190)",
+      "insert into root.vehicle.d0(timestamp,s1) values(105,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,40000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(50,50000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(1000,55555)",
+
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,55555)",
+      "insert into root.vehicle.d0(timestamp,s2) values(2,2.22)",
+      "insert into root.vehicle.d0(timestamp,s2) values(3,3.33)",
+      "insert into root.vehicle.d0(timestamp,s2) values(4,4.44)",
+      "insert into root.vehicle.d0(timestamp,s2) values(102,10.00)",
+      "insert into root.vehicle.d0(timestamp,s2) values(105,11.11)",
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,1000.11)",
+
+      "insert into root.vehicle.d0(timestamp,s3) values(60,'aaaaa')",
+      "insert into root.vehicle.d0(timestamp,s3) values(70,'bbbbb')",
+      "insert into root.vehicle.d0(timestamp,s3) values(80,'ccccc')",
+      "insert into root.vehicle.d0(timestamp,s3) values(101,'ddddd')",
+      "insert into root.vehicle.d0(timestamp,s3) values(102,'fffff')",
+
+      "insert into root.vehicle.d0(timestamp,s4) values(23, false)",
+      "insert into root.vehicle.d0(timestamp,s4) values(100, true)",
+
+      "insert into root.vehicle.d1(timestamp,s0) values(1,999)",
+      "insert into root.vehicle.d1(timestamp,s0) values(1000,10)",
+
+      "insert into root.vehicle.d1(timestamp,s1) values(2,9999)",
+      "insert into root.vehicle.d1(timestamp,s1) values(1000,5)",
+
+      "insert into root.vehicle.d1(timestamp,s2) values(2,12345.6)",
+      "insert into root.vehicle.d1(timestamp,s2) values(2222,2.22)",
+
+      "insert into root.vehicle.d1(timestamp,s3) values(10,'ten')",
+      "insert into root.vehicle.d1(timestamp,s3) values(1000,'thousand')",
+
+      "insert into root.vehicle.d1(timestamp,s4) values(100, false)",
+      "insert into root.vehicle.d1(timestamp,s4) values(10000, true)",
+
+      "insert into root.vehicle.d0(timestamp,s1) 
values(2000-01-01T08:00:00+08:00, 100)",
+      "insert into root.vehicle.d0(timestamp,s3) 
values(2000-01-01T08:00:00+08:00, 'good')",
+
+      "insert into root.other.d1(timestamp,s0) values(2, 3.14)",};
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.envSetUp();
+
+    insertData();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    EnvironmentUtils.cleanEnv();
+  }
+
+  private static void insertData() throws ClassNotFoundException {
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", 
"root");
+        Statement statement = connection.createStatement()) {
+
+      for (String sql : sqls) {
+        statement.execute(sql);
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void testSelect() {
+    CalciteAssert.that()
+        .with(MODEL)
+        .with("UnQuotedCasing", IoTDBConstant.UNQUOTED_CASING)
+        .query("select * from \"root.vehicle\"")

Review comment:
       What's the sql dialect here ? oracle , mysql  or else ?

##########
File path: calcite/IoTDB-Calcite-Adapter.md
##########
@@ -0,0 +1,232 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+# IoTDB - Calcite Adapter 功能文档
+
+## 关系表结构
+
+IoTDB - Calcite Adapter 中使用的关系表结构为:
+
+| time | device | sensor1 | sensor2 | sensor3 | ... |
+| ---- | ------ | ------- | ------- | ------- | --- |
+|      |        |         |         |         |     |
+
+其中,IoTDB 中每个存储组作为一张表,表中的列包括 time , device 
列以及该存储组中所有设备中传感器的最大并集,其中不同设备的同名传感器应该具有相同的数据类型。
+
+例如对于 IoTDB 中存储组 `root.sg`,其中设备及其对应的传感器为:
+- d1 -> s1, s2
+- d2 -> s2, s3
+- d3 -> s1, s4
+
+则在 IoTDB - Calcite Adapter 中的表名为 `root.sg`,其表结构为
+
+| time | device | s1  | s2  | s3  | s4  |
+| ---- | ------ | --- | --- | --- | --- |
+|      |        |     |     |     |     |
+
+## 工作原理
+
+接下来简单介绍 IoTDB - Calcite Adapter 的工作原理。
+
+输入的 SQL 语句在经过 Calicte 的解析验证后,对 `IoTDBRules` 
中定义的优化(下推)规则进行匹配,对于能够下推的节点做相应转化后,得到能够在 IoTDB 端执行的 SQL 语句,然后在 IoTDB 
端执行查询语句获取源数据;对于不能下推的节点则调用 Calcite 默认的物理计划进行执行,最后通过 `IoTDBEnumerator` 遍历结果集获取结果。
+
+
+## 查询介绍
+
+当前在 `IoTDBRules` 中定义的下推规则有:`IoTDBProjectRule`, `IoTDBFilterRule`, 
`IoTDBLimitRule`。
+
+### IoTDBProjectRule
+
+IoTDBProjectRule 实现了将查询语句中出现的投影列下推到 IoTDB 端进行执行。
+
+例如:(以下 sql 均为测试中的语句)
+
+1. 对于通配符
+
+```sql
+select * from "root.vehicle"
+```
+
+对于通配符 `*`,将在转化中保持原样,而不转化为列名,得到 IoTDB 中的查询语句为:
+
+```sql
+select * from root.vehicle.* align by device
+```
+
+2. 对于非通配符的传感器列
+
+```sql
+select s0 from "root.vehicle"
+```
+
+将转化为:
+
+```sql
+select s0 from root.vehicle.* align by device
+```
+
+3. 对于非通配符的非传感器列
+
+```sql
+select "time", device, s2 from "root.vehicle"
+```
+
+该语句中的 time 及 device 列是 IoTDB 的查询语句中不需要包括的,因此转化将去掉这两列,得到 IoTDB 中的查询语句为:
+
+```sql
+select s2 from root.vehicle.* align by device
+```
+
+特别地,如果查询语句中仅包含 time 及 device 列,则投影部分将转化为通配符 `*`。
+
+4. 重命名 Alias
+
+当前 IoTDB - Calcite Adapter 仅支持在 SELECT 语句中对投影列进行重命名,不支持在后续语句中使用重命名后的名称。
+
+```sql
+select "time" AS t, device AS d, s2 from "root.vehicle"
+```
+
+将得到结果中 time 列的名字为 t,device 列的名字为 d。
+
+### IoTDBFilterRule
+
+IoTDBFilterRule 实现了将查询语句中的 WHERE 子句下推到 IoTDB 端进行执行。
+
+1. WHERE 子句中不限制 device 列
+
+```sql
+select * from "root.vehicle" where "time" < 10 AND s0 >= 150
+```
+
+对于 time 列将不作改变,由于未限制具体的设备,因此传感器列不会与具体的设备名进行拼接,得到 IoTDB 中的查询语句为:
+
+```sql
+select * from root.vehicle.* where time < 10 AND s0 >= 150
+```
+
+2. WHERE 子句中限制 device 列
+
+- 仅限制单个设备
+
+```sql
+select * from "root.vehicle" where device = 'root.vehicle.d0' AND "time" > 10 
AND s0 <= 100
+```
+
+如果 WHERE 中只限制了单个设备且其它限制条件均是对该设备的限制,则在 IoTDB 中将转化为对该设备的查询,上述查询将转化为:
+
+```sql
+select * from root.vehicle.d0 where time > 10 AND s0 <= 100
+```
+
+- 限制多个设备
+
+```sql
+select * from "root.vehicle" where (device = 'root.vehicle.d0' AND "time" <= 
1) OR (device = 'root.vehicle.d1' AND s0 < 100)
+```
+
+如果 WHERE 中限制了多个设备,将转化为多条查询语句,根据对每个设备的限制条件分别进行查询。
+
+如上述查询语句将转化为两条 SQL 在 IoTDB 中执行:
+
+```sql
+select * from root.vehicle.d0 where time <= 1
+select * from root.vehicle.d1 where s0 < 100
+```
+
+- 既有限制设备的条件,又有全局条件
+
+```sql
+select * from "root.vehicle" where (device = 'root.vehicle.d0' AND "time" <= 
1) OR s0 = 999
+```
+
+在上述 SQL 语句中,除了有对设备 `root.vehicle.d0` 的单独限制外,还有一个限制条件 `s0 = 
999`,该限制条件被认为是一个全局条件,任何设备只要满足该条件都被认为是正确结果。
+
+因此上述查询将转化为对存储组中所有设备的查询,对于有单独限制条件的设备将单独处理,其它剩余设备将使用全局条件统一查询。
+
+```sql
+select * from root.vehicle.d0 where time <= 1 OR s0 = 999
+select * from root.vehicle.d1 where s0 = 999
+```
+
+注:由于测试中恰好只有两个设备,如果再有一个设备 d2,则将在 FROM 子句加上 `root.vehicle.d2` 而非为设备 d2 单独再次查询。
+
+### IoTDBLimitRule
+
+IoTDBLimitRule 实现了将查询语句中的 LIMIT 及 OFFSET 子句下推到 IoTDB 端进行执行。

Review comment:
       In fact, IoTDBLimitRule can't push limit and offset down to IoTDBTable, 
the limit and offset logic in done in code generated by calcite see 
`IoTDBToEnumerableConverter#implement` which will call `IoTDBTable#query` by 
reflection
   
   you can debug the `query` method and see it

##########
File path: calcite/src/main/java/org/apache/iotdb/calcite/IoTDBTable.java
##########
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.calcite;
+
+import com.google.common.collect.ImmutableList;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.calcite.adapter.java.AbstractQueryableTable;
+import org.apache.calcite.linq4j.AbstractEnumerable;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Enumerator;
+import org.apache.calcite.linq4j.QueryProvider;
+import org.apache.calcite.linq4j.Queryable;
+import org.apache.calcite.linq4j.function.Function1;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeImpl;
+import org.apache.calcite.rel.type.RelDataTypeSystem;
+import org.apache.calcite.rel.type.RelProtoDataType;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.TranslatableTable;
+import org.apache.calcite.schema.impl.AbstractTableQueryable;
+import org.apache.calcite.sql.type.SqlTypeFactoryImpl;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.Util;
+import org.apache.iotdb.db.exception.query.QueryProcessException;
+
+public class IoTDBTable extends AbstractQueryableTable
+    implements TranslatableTable {
+
+  RelProtoDataType protoRowType;
+  private final IoTDBSchema schema;
+  private final String storageGroup;
+
+  public IoTDBTable(IoTDBSchema schema, String storageGroup) {
+    super(Object[].class);
+    this.schema = schema;
+    this.storageGroup = storageGroup;
+  }
+
+  public String toString() {
+    return "IoTDBTable {" + storageGroup + "}";
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
+    try {
+      if (protoRowType == null) {
+        protoRowType = schema.getRelDataType(storageGroup);
+      }
+    } catch (SQLException | QueryProcessException e) {
+      // print exception error here
+      e.printStackTrace();
+    }
+    return protoRowType.apply(typeFactory);
+  }
+
+  @Override
+  public <T> Queryable<T> asQueryable(QueryProvider queryProvider, SchemaPlus 
schema,
+      String tableName) {
+    return new IoTDBQueryable<>(queryProvider, schema, this, storageGroup);
+  }
+
+  @Override
+  public RelNode toRel(RelOptTable.ToRelContext context, RelOptTable 
relOptTable) {
+    final RelOptCluster cluster = context.getCluster();
+    return new IoTDBTableScan(cluster, cluster.traitSetOf(IoTDBRel.CONVENTION),
+        relOptTable, this, null);
+  }
+
+  public Enumerable<Object> query(final Connection connection) {
+    return query(connection, ImmutableList.of(), ImmutableList.of(), 
ImmutableList.of(),
+        ImmutableList.of(), 0, 0);

Review comment:
       Cassandra do not support to push limit and offset down to table scan, so 
just set 0 here and do the logic in upper relnode




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to