tristaZero commented on a change in pull request #10889: URL: https://github.com/apache/shardingsphere/pull/10889#discussion_r657037370
########## File path: shardingsphere-infra/shardingsphere-infra-optimize/src/test/java/org/apache/shardingsphere/infra/optimize/core/convert/SelectStatementSqlNodeConverterTest.java ########## @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.shardingsphere.infra.optimize.core.convert; + +import org.apache.calcite.sql.SqlJoin; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlSelect; +import org.apache.shardingsphere.infra.database.type.DatabaseTypeRegistry; +import org.apache.shardingsphere.infra.database.type.dialect.MySQLDatabaseType; +import org.apache.shardingsphere.infra.parser.ShardingSphereSQLParserEngine; +import org.apache.shardingsphere.sql.parser.sql.common.statement.SQLStatement; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Optional; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; + +/** + * testcase of converting shardingshphere ast to calcite ast. + * + * <p>after converting phrase finished, the next phrase is comparing the converted result with the + * result of calcite parser. + * </p> + */ +public final class SelectStatementSqlNodeConverterTest { + + private ShardingSphereSQLParserEngine sqlStatementParserEngine; + + @Before + public void init() { + sqlStatementParserEngine = new ShardingSphereSQLParserEngine(DatabaseTypeRegistry.getTrunkDatabaseTypeName( + new MySQLDatabaseType())); + } + + @Test + public void testConvertSimpleSelect() { + String sql = "select order_id, user_id from t_order"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + Assert.assertThat(optional.get(), instanceOf(SqlSelect.class)); + SqlSelect sqlSelect = (SqlSelect) optional.get(); + assertEquals(2, sqlSelect.getSelectList().size()); + assertNull(sqlSelect.getWhere()); + /* + TODO compare ast from calcite parser and ast converted from ss ast if possible + SqlParser parser = SqlParser.create(sql); + SqlNode calciteSqlNode = parser.parseQuery(); + Assert.assertNotNull(calciteSqlNode); + */ + } + + @Test + public void testConvertSimpleSelectFilter() { + String sql = "select order_id, user_id from t_order where order_id = 10"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + Assert.assertThat(optional.get(), instanceOf(SqlSelect.class)); + SqlSelect sqlSelect = (SqlSelect) optional.get(); + assertEquals(2, sqlSelect.getSelectList().size()); + assertNotNull(sqlSelect.getWhere()); + } + + @Test + public void testConvertSimpleSelectFilterGroupBy() { + String sql = "select order_id, user_id from t_order where order_id = 10 group by order_id"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + Assert.assertThat(optional.get(), instanceOf(SqlSelect.class)); + SqlSelect sqlSelect = (SqlSelect) optional.get(); + assertEquals(2, sqlSelect.getSelectList().size()); + assertNotNull(sqlSelect.getWhere()); + assertEquals(1, sqlSelect.getGroup().size()); + } + + @Test + public void testConvertSimpleSelectFilterOrderBy() { + String sql = "select order_id, user_id from t_order where user_id = 10 order by order_id desc"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + assertThat(optional.get(), instanceOf(SqlSelect.class)); + SqlSelect sqlSelect = (SqlSelect) optional.get(); + assertEquals(2, sqlSelect.getSelectList().size()); + assertNotNull(sqlSelect.getWhere()); + assertEquals(1, sqlSelect.getOrderList().size()); + } + + @Test + public void testConvertInnerJoin() { + String sql = "select 10 + 30, o1.order_id + 10, o1.order_id, o1.user_id, o2.status from t_order o1 join t_order_item o2 on " + + "o1.order_id = o2.order_id where o1.status='FINISHED' and o2.order_item_id > 1024 and 1=1 order by " + + "o1.order_id desc"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + assertThat(optional.get(), instanceOf(SqlSelect.class)); + SqlSelect sqlSelect = (SqlSelect) optional.get(); + assertThat(sqlSelect.getFrom(), instanceOf(SqlJoin.class)); + assertEquals(1, sqlSelect.getOrderList().size()); + } + + @Test + public void testConvertLeftOuterJoin() { + String sql = "select 10 + 30, o1.order_id + 10, o1.order_id, o1.user_id, o2.status from t_order o1 left outer join t_order_item o2 on " + + "o1.order_id = o2.order_id where o1.status='FINISHED' and o2.order_item_id > 1024 and 1=1 order by " + + "o1.order_id desc"; + SQLStatement sqlStatement = sqlStatementParserEngine.parse(sql, false); + Optional<SqlNode> optional = SqlNodeConvertEngine.convert(sqlStatement); + assertTrue(optional.isPresent()); + // TODO outer join is not supported by parser of ShardingSphere + } + Review comment: Could you remove this redundant line? ########## File path: shardingsphere-infra/shardingsphere-infra-optimize/src/main/java/org/apache/shardingsphere/infra/optimize/core/convert/converter/impl/SelectStatementSqlNodeConverter.java ########## @@ -22,20 +22,66 @@ import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.shardingsphere.infra.optimize.core.convert.converter.SqlNodeConverter; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.pagination.PaginationValueSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.pagination.limit.LimitSegment; import org.apache.shardingsphere.sql.parser.sql.common.statement.dml.SelectStatement; +import org.apache.shardingsphere.sql.parser.sql.dialect.handler.dml.SelectStatementHandler; +import java.util.AbstractMap; +import java.util.Map; import java.util.Optional; /** * Select statement sql node converter. */ -public final class SelectStatementSqlNodeConverter implements SqlNodeConverter<SelectStatement> { +public final class SelectStatementSqlNodeConverter implements SqlNodeConverter<SelectStatement, SqlNode> { @Override public Optional<SqlNode> convert(final SelectStatement selectStatement) { - Optional<SqlNode> distinct = new DistinctSqlNodeConverter().convert(selectStatement.getProjections()); - // TODO : prepare other sqlNodes referring to `distinct`. - return Optional.of(new SqlSelect(SqlParserPos.ZERO, (SqlNodeList) distinct.orElse(null), null, null, null, null, null, - null, null, null, null, null)); + Optional<SqlNodeList> distinct = new DistinctSqlNodeConverter().convert(selectStatement.getProjections()); + Optional<SqlNodeList> projections = new ProjectionsSqlNodeConverter().convert(selectStatement.getProjections()); + Optional<SqlNode> from = new TableSqlNodeConverter().convert(selectStatement.getFrom()); + Optional<SqlNode> where = new WhereSqlNodeConverter().convert(selectStatement.getWhere().orElse(null)); + Optional<SqlNodeList> groupBy = new GroupBySqlNodeConverter().convert(selectStatement.getGroupBy().orElse(null)); + Optional<SqlNode> having = new HavingSqlNodeConverter().convert(selectStatement.getHaving().orElse(null)); + Optional<SqlNodeList> orderBy = new OrderBySqlNodeConverter().convert(selectStatement.getOrderBy().orElse(null)); + Optional<LimitSegment> limit = SelectStatementHandler.getLimitSegment(selectStatement); + Map.Entry<SqlNode, SqlNode> offsetRowCount = convertPagination(limit.orElse(null)); + return Optional.of(new SqlSelect(SqlParserPos.ZERO, + distinct.orElse(null), + projections.orElse(null), + from.orElse(null), + where.orElse(null), + groupBy.orElse(null), + having.orElse(null), + null, + orderBy.orElse(null), + offsetRowCount.getKey(), + offsetRowCount.getValue(), + null)); } + + /** + * convert pagination. + * @param limitSegment pagination clause + * @return offset and fetch <code>SqlNode</code>. + */ + public static Map.Entry<SqlNode, SqlNode> convertPagination(final LimitSegment limitSegment) { + if (limitSegment == null) { + return new AbstractMap.SimpleEntry<>(null, null); + } + + Optional<SqlNode> offsetSqlNode = Optional.empty(); + Optional<SqlNode> fetchSqlNode = Optional.empty(); + Optional<PaginationValueSegment> offset = limitSegment.getOffset(); + Optional<PaginationValueSegment> fetch = limitSegment.getRowCount(); + if (offset.isPresent()) { + offsetSqlNode = new PaginationValueSqlConverter().convert(offset.get()); + } + if (fetch.isPresent()) { + fetchSqlNode = new PaginationValueSqlConverter().convert(fetch.get()); + } + return new AbstractMap.SimpleEntry<>(offsetSqlNode.orElse(null), fetchSqlNode.orElse(null)); + } + Review comment: Hi could your remove this blank line? ########## File path: shardingsphere-infra/shardingsphere-infra-optimize/src/main/java/org/apache/shardingsphere/infra/optimize/core/convert/converter/SqlNodeConverter.java ########## @@ -17,20 +17,55 @@ package org.apache.shardingsphere.infra.optimize.core.convert.converter; +import com.google.common.collect.Lists; import org.apache.calcite.sql.SqlNode; +import org.apache.shardingsphere.infra.optimize.core.convert.converter.impl.ColumnOrderByItemSqlNodeConverter; import org.apache.shardingsphere.sql.parser.api.visitor.ASTNode; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.order.item.ColumnOrderByItemSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.order.item.ExpressionOrderByItemSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.order.item.IndexOrderByItemSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.order.item.OrderByItemSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.order.item.TextOrderByItemSegment; +import java.util.Collection; +import java.util.List; import java.util.Optional; /** * SqlNode converter. */ -public interface SqlNodeConverter<T extends ASTNode> { +public interface SqlNodeConverter<T extends ASTNode, R extends SqlNode> { /** * Convert. * @param astNode ast node * @return sqlNode optional */ - Optional<SqlNode> convert(T astNode); + Optional<R> convert(T astNode); + + /** + * Convert order by items. + * @param orderByItems order by item list. + * @return a collection of order by item <code>SqlNode</code> + */ + default List<SqlNode> convertOrderByItems(final Collection<OrderByItemSegment> orderByItems) { Review comment: It looks `convertOrderByItems` is like `PaginationValueSqlConverter`. Do you think it will better not to provide `convertOrderByItems` as a default implement? I mean, `default` is much general for most of the classes. But `orderByItem` does not apply to many simple queries, like `select * from tb`; ########## File path: shardingsphere-infra/shardingsphere-infra-optimize/src/main/java/org/apache/shardingsphere/infra/optimize/core/convert/converter/impl/SelectStatementSqlNodeConverter.java ########## @@ -22,20 +22,69 @@ import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.shardingsphere.infra.optimize.core.convert.converter.SqlNodeConverter; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.pagination.PaginationValueSegment; +import org.apache.shardingsphere.sql.parser.sql.common.segment.dml.pagination.limit.LimitSegment; import org.apache.shardingsphere.sql.parser.sql.common.statement.dml.SelectStatement; +import org.apache.shardingsphere.sql.parser.sql.dialect.handler.dml.SelectStatementHandler; +import java.util.AbstractMap; +import java.util.Map; import java.util.Optional; /** * Select statement sql node converter. */ -public final class SelectStatementSqlNodeConverter implements SqlNodeConverter<SelectStatement> { +public final class SelectStatementSqlNodeConverter implements SqlNodeConverter<SelectStatement, SqlNode> { @Override public Optional<SqlNode> convert(final SelectStatement selectStatement) { - Optional<SqlNode> distinct = new DistinctSqlNodeConverter().convert(selectStatement.getProjections()); - // TODO : prepare other sqlNodes referring to `distinct`. - return Optional.of(new SqlSelect(SqlParserPos.ZERO, (SqlNodeList) distinct.orElse(null), null, null, null, null, null, - null, null, null, null, null)); + Optional<SqlNodeList> distinct = new DistinctSqlNodeConverter().convert(selectStatement.getProjections()); + Optional<SqlNodeList> projections = new ProjectionsSqlNodeConverter().convert(selectStatement.getProjections()); + Optional<SqlNode> from = new TableSqlNodeConverter().convert(selectStatement.getFrom()); + Optional<SqlNode> where = new WhereSqlNodeConverter().convert(selectStatement.getWhere().orElse(null)); + Optional<SqlNodeList> groupBy = new GroupBySqlNodeConverter().convert(selectStatement.getGroupBy().orElse(null)); + Optional<SqlNode> having = new HavingSqlNodeConverter().convert(selectStatement.getHaving().orElse(null)); + Optional<SqlNodeList> orderBy = new OrderBySqlNodeConverter().convert(selectStatement.getOrderBy().orElse(null)); + Optional<LimitSegment> limit = SelectStatementHandler.getLimitSegment(selectStatement); + Map.Entry<SqlNode, SqlNode> offsetRowCount = convertPagination(limit.orElse(null)); + return Optional.of(new SqlSelect(SqlParserPos.ZERO, + distinct.orElse(null), + projections.orElse(null), + from.orElse(null), + where.orElse(null), + groupBy.orElse(null), + having.orElse(null), + null, + orderBy.orElse(null), + offsetRowCount.getKey(), + offsetRowCount.getValue(), + null)); } + + /** + * convert pagination. + * @param limitSegment pagination clause + * @return offset and fetch <code>SqlNode</code>. + */ + public static Map.Entry<SqlNode, SqlNode> convertPagination(final LimitSegment limitSegment) { Review comment: Hi do you think #10949 is feasible to fix this issue? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
