LakshSingla commented on code in PR #12386:
URL: https://github.com/apache/druid/pull/12386#discussion_r842959365
##########
sql/src/main/codegen/config.fmpp:
##########
@@ -51,12 +51,15 @@ data: {
# List of additional classes and packages to import.
# Example. "org.apache.calcite.sql.*", "java.util.List".
imports: [
+ "java.util.List"
Review Comment:
Can you please check if this is included in Parser.jj already or not. If so,
then we might not want to add it here.
##########
sql/src/main/codegen/includes/common.ftl:
##########
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+SqlNodeList ClusterItems() :
+{
+ List<SqlNode> list;
+ final Span s;
+ SqlNode e;
+}
+{
+ e = OrderItem() {
+ s = span();
+ list = startList(e);
+ }
+ (
+ LOOKAHEAD(2) <COMMA> e = OrderItem() { list.add(e); }
+ )*
+ {
+ return new SqlNodeList(list, s.addAll(list).pos());
+ }
+}
+
+org.apache.druid.java.util.common.Pair<Granularity, String>
PartitionGranularity() :
+{
+ SqlNode e = null;
+ Granularity granularity = null;
+ String unparseString = null;
+}
+{
+ (
+ <HOUR>
+ {
+ granularity = Granularities.HOUR;
+ unparseString = "HOUR";
+ }
+ |
+ <DAY>
+ {
+ granularity = Granularities.DAY;
+ unparseString = "DAY";
+ }
+ |
+ <MONTH>
+ {
+ granularity = Granularities.MONTH;
+ unparseString = "MONTH";
+ }
+ |
+ <YEAR>
+ {
+ granularity = Granularities.YEAR;
+ unparseString = "YEAR";
+ }
+ |
+ <ALL>
+ {
+ granularity = Granularities.ALL;
+ unparseString = "ALL";
+ }
+ [
+ <TIME>
+ {
+ unparseString += " TIME";
+ }
+ ]
+ |
+ e = Expression(ExprContext.ACCEPT_SUB_QUERY)
+ {
+ granularity =
DruidSqlParserUtils.convertSqlNodeToGranularityThrowingParseExceptions(e);
+ unparseString = e.toString();
+ }
+ )
+ {
+ return new org.apache.druid.java.util.common.Pair(granularity,
unparseString);
+ }
+}
+
+List<String> PartitionSpecs() :
+{
+ List<String> list;
+ String intervalString;
+}
+{
+ (
+ intervalString = PartitionSpec()
+ {
+ return startList(intervalString);
+ }
+ |
+ <LPAREN>
+ intervalString = PartitionSpec()
+ {
+ list = startList(intervalString);
+ }
+ (
+ <COMMA>
+ intervalString = PartitionSpec()
+ {
+ list.add(intervalString);
+ }
+ )*
+ <RPAREN>
+ {
+ return list;
+ }
+ )
+}
+
+String PartitionSpec() :
+{
+ SqlNode sqlNode;
+}
+{
+ (
+ <ALL> <TIME>
+ {
+ return "all";
+ }
+ |
+ <PARTITION> sqlNode = StringLiteral()
+ {
+ return SqlParserUtil.parseString(SqlLiteral.stringValue(sqlNode));
+ }
+ )
+}
Review Comment:
nit: newline. `.ftl` files are not checked for their format.
##########
sql/src/main/codegen/includes/replace.ftl:
##########
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// Using fully qualified name for Pair class, since Calcite also has a same
class name being used in the Parser.jj
+SqlNode DruidSqlReplace() :
+{
+ final List<SqlLiteral> keywords = new ArrayList<SqlLiteral>();
+ final SqlNodeList keywordList;
+ SqlNode table;
+ SqlNodeList extendList = null;
+ SqlNode source;
+ SqlNodeList columnList = null;
+ final Span s;
+ SqlInsert sqlInsert;
+ org.apache.druid.java.util.common.Pair<Granularity, String> partitionedBy
= new org.apache.druid.java.util.common.Pair(null, null);
+ List<String> partitionSpecList;
+}
+{
+ <REPLACE> { s = span(); }
+ SqlInsertKeywords(keywords)
+ {
+ keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
+ }
+ <INTO> table = CompoundIdentifier()
+ <FOR> partitionSpecList = PartitionSpecs()
+ [
+ LOOKAHEAD(5)
+ [ <EXTEND> ]
+ extendList = ExtendList() {
+ table = extend(table, extendList);
+ }
+ ]
+ [
+ LOOKAHEAD(2)
+ { final Pair<SqlNodeList, SqlNodeList> p; }
Review Comment:
Any particular reason why this declaration is here instead of the top-level
declarations?
##########
sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java:
##########
@@ -290,12 +305,13 @@ private void resetPlanner()
private PlannerResult planWithDruidConvention(
final RelRoot root,
@Nullable final SqlExplain explain,
- @Nullable final SqlInsert insert
+ @Nullable final SqlInsert insert,
Review Comment:
I echo with Rohan's comment here. If we can help it we should try and
collapse insert and replace into a single variable. The only time distinction
would be necessary would be when we are trying to populate the context
parameters with the time chunks/clustered by/partitioned by etc. Otherwise, the
underlying engine doesn't need to know about the presence of two separate nodes.
##########
sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlReplace.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.parser;
+
+import com.google.common.base.Preconditions;
+import org.apache.calcite.sql.SqlInsert;
+import org.apache.calcite.sql.SqlNodeList;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlWriter;
+import org.apache.druid.java.util.common.granularity.Granularity;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.List;
+
+/**
+ * Extends the 'replace' call to hold custom parameters specific to Druid i.e.
PARTITIONED BY and the PARTITION SPECS
+ * This class extends the {@link SqlInsert} so that this SqlNode can be used in
+ * {@link org.apache.calcite.sql2rel.SqlToRelConverter} for getting converted
into RelNode, and further processing
+ */
+public class DruidSqlReplace extends SqlInsert
+{
+ public static final String SQL_REPLACE_TIME_CHUNKS = "sqlReplaceTimeChunks";
+
+ // This allows reusing super.unparse
+ public static final SqlOperator OPERATOR = SqlInsert.OPERATOR;
+
+ private final Granularity partitionedBy;
+ private final String partitionedByStringForUnparse;
+
+ private final List<String> replaceTimeChunks;
+
+ /**
+ * While partitionedBy and partitionedByStringForUnparse can be null as
arguments to the constructor, this is
+ * disallowed (semantically) and the constructor performs checks to ensure
that. This helps in producing friendly
+ * errors when the PARTITIONED BY custom clause is not present, and keeps
its error separate from JavaCC/Calcite's
+ * custom errors which can be cryptic when someone accidentally forgets to
explicitly specify the PARTITIONED BY clause
+ */
+ public DruidSqlReplace(
+ @Nonnull SqlInsert insertNode,
+ @Nullable Granularity partitionedBy,
+ @Nullable String partitionedByStringForUnparse,
+ @Nonnull List<String> replaceTimeChunks
+ ) throws ParseException
+ {
+ super(
+ insertNode.getParserPosition(),
+ (SqlNodeList) insertNode.getOperandList().get(0), // No better getter
to extract this
+ insertNode.getTargetTable(),
+ insertNode.getSource(),
+ insertNode.getTargetColumnList()
+ );
+ if (partitionedBy == null) {
+ throw new ParseException("REPLACE statements must specify PARTITIONED BY
clause explictly");
+ }
+ this.partitionedBy = partitionedBy;
+
+ Preconditions.checkNotNull(partitionedByStringForUnparse);
+ this.partitionedByStringForUnparse = partitionedByStringForUnparse;
+
+ this.replaceTimeChunks = replaceTimeChunks;
+ }
+
+ public List<String> getReplaceTimeChunks()
+ {
+ return replaceTimeChunks;
+ }
+
+ public Granularity getPartitionedBy()
+ {
+ return partitionedBy;
+ }
+
+ @Nonnull
+ @Override
+ public SqlOperator getOperator()
+ {
+ return OPERATOR;
+ }
+
+ @Override
+ public void unparse(SqlWriter writer, int leftPrec, int rightPrec)
+ {
+ writer.startList(SqlWriter.FrameTypeEnum.SELECT);
+ writer.sep("REPLACE INTO");
+ final int opLeft = getOperator().getLeftPrec();
+ final int opRight = getOperator().getRightPrec();
+ getTargetTable().unparse(writer, opLeft, opRight);
+
+ writer.keyword("FOR");
+ final SqlWriter.Frame frame = writer.startList("(", ")");
+ List<String> replaceTimeChunks = getReplaceTimeChunks();
+ for (String replaceTimeChunk : replaceTimeChunks) {
+ writer.sep(","); // sep() takes care of not printing the first separator
+ writer.literal(unparseTimeChunk(replaceTimeChunk));
+ }
+ writer.endList(frame);
+
+ if (getTargetColumnList() != null) {
+ getTargetColumnList().unparse(writer, opLeft, opRight);
+ }
+ writer.newlineAndIndent();
+ getSource().unparse(writer, 0, 0);
+ writer.keyword("PARTITIONED BY");
+ writer.keyword(partitionedByStringForUnparse);
+ }
+
+ private String unparseTimeChunk(String timeChunkString)
+ {
+ if ("all".equals(timeChunkString)) {
+ return "ALL TIME";
Review Comment:
This seems weird to me. unparse basically is a string representation of
whatever the user entered, with proper indentation, parenthesizations, and
capitalization (as per my understanding). We shouldn't try to convert it to
equivalent form if we can help it.
##########
sql/src/main/codegen/includes/replace.ftl:
##########
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// Using fully qualified name for Pair class, since Calcite also has a same
class name being used in the Parser.jj
Review Comment:
nit: Can be moved to where `Pair` is actually used and not as a top-level
comment for the rule.
##########
sql/src/main/codegen/includes/replace.ftl:
##########
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// Using fully qualified name for Pair class, since Calcite also has a same
class name being used in the Parser.jj
+SqlNode DruidSqlReplace() :
+{
+ final List<SqlLiteral> keywords = new ArrayList<SqlLiteral>();
+ final SqlNodeList keywordList;
+ SqlNode table;
+ SqlNodeList extendList = null;
+ SqlNode source;
+ SqlNodeList columnList = null;
+ final Span s;
+ SqlInsert sqlInsert;
+ org.apache.druid.java.util.common.Pair<Granularity, String> partitionedBy
= new org.apache.druid.java.util.common.Pair(null, null);
+ List<String> partitionSpecList;
+}
+{
+ <REPLACE> { s = span(); }
+ SqlInsertKeywords(keywords)
+ {
+ keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
+ }
+ <INTO> table = CompoundIdentifier()
+ <FOR> partitionSpecList = PartitionSpecs()
+ [
+ LOOKAHEAD(5)
+ [ <EXTEND> ]
+ extendList = ExtendList() {
+ table = extend(table, extendList);
+ }
+ ]
+ [
+ LOOKAHEAD(2)
Review Comment:
Same as above, can you please explain the reason for this lookahead?
(Doesn't need to be as a comment in the code, you can add the reason as a reply
as well). This would help me follow the logic
##########
sql/src/main/codegen/includes/replace.ftl:
##########
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// Using fully qualified name for Pair class, since Calcite also has a same
class name being used in the Parser.jj
+SqlNode DruidSqlReplace() :
+{
+ final List<SqlLiteral> keywords = new ArrayList<SqlLiteral>();
+ final SqlNodeList keywordList;
+ SqlNode table;
+ SqlNodeList extendList = null;
+ SqlNode source;
+ SqlNodeList columnList = null;
+ final Span s;
+ SqlInsert sqlInsert;
+ org.apache.druid.java.util.common.Pair<Granularity, String> partitionedBy
= new org.apache.druid.java.util.common.Pair(null, null);
+ List<String> partitionSpecList;
+}
+{
+ <REPLACE> { s = span(); }
+ SqlInsertKeywords(keywords)
+ {
+ keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
+ }
+ <INTO> table = CompoundIdentifier()
+ <FOR> partitionSpecList = PartitionSpecs()
+ [
+ LOOKAHEAD(5)
Review Comment:
Can you please add the reason for this lookahead? I am unable to follow this
optional block. Alternatively, if this is to be removed as per Rohan's comment,
you can ignore my comment.
##########
sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java:
##########
@@ -760,82 +779,116 @@ public T next()
@Nullable
private final DruidSqlInsert insert;
+ @Nullable
+ private final DruidSqlReplace replace;
+
private final SqlNode query;
@Nullable
private final Granularity ingestionGranularity;
+ @Nullable
+ private final List<String> replaceTimeChunks;
+
private ParsedNodes(
@Nullable SqlExplain explain,
@Nullable DruidSqlInsert insert,
+ @Nullable DruidSqlReplace replace,
SqlNode query,
- @Nullable Granularity ingestionGranularity
+ @Nullable Granularity ingestionGranularity,
+ @Nullable List<String> partitionSpec
)
{
this.explain = explain;
this.insert = insert;
+ this.replace = replace;
this.query = query;
this.ingestionGranularity = ingestionGranularity;
+ this.replaceTimeChunks = partitionSpec;
}
static ParsedNodes create(final SqlNode node) throws ValidationException
{
SqlExplain explain = null;
DruidSqlInsert druidSqlInsert = null;
+ DruidSqlReplace druidSqlReplace = null;
SqlNode query = node;
Granularity ingestionGranularity = null;
+ List<String> replaceTimeChunks = null;
if (query.getKind() == SqlKind.EXPLAIN) {
explain = (SqlExplain) query;
query = explain.getExplicandum();
}
if (query.getKind() == SqlKind.INSERT) {
- druidSqlInsert = (DruidSqlInsert) query;
- query = druidSqlInsert.getSource();
-
- // Check if ORDER BY clause is not provided to the underlying query
- if (query instanceof SqlOrderBy) {
- SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
- SqlNodeList orderByList = sqlOrderBy.orderList;
- if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY)))
{
- throw new ValidationException("Cannot have ORDER BY on an INSERT
query, use CLUSTERED BY instead.");
- }
- }
+ if (query instanceof DruidSqlInsert) {
+ druidSqlInsert = (DruidSqlInsert) query;
+ query = druidSqlInsert.getSource();
- ingestionGranularity = druidSqlInsert.getPartitionedBy();
+ // Check if ORDER BY clause is not provided to the underlying query
+ if (query instanceof SqlOrderBy) {
+ SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
+ SqlNodeList orderByList = sqlOrderBy.orderList;
+ if (!(orderByList == null ||
orderByList.equals(SqlNodeList.EMPTY))) {
+ throw new ValidationException("Cannot have ORDER BY on an INSERT
query, use CLUSTERED BY instead.");
+ }
+ }
- if (druidSqlInsert.getClusteredBy() != null) {
- // If we have a CLUSTERED BY clause, extract the information in that
CLUSTERED BY and create a new SqlOrderBy
- // node
- SqlNode offset = null;
- SqlNode fetch = null;
+ ingestionGranularity = druidSqlInsert.getPartitionedBy();
+
+ if (druidSqlInsert.getClusteredBy() != null) {
+ // If we have a CLUSTERED BY clause, extract the information in
that CLUSTERED BY and create a new
+ // SqlOrderBy node
+ SqlNode offset = null;
+ SqlNode fetch = null;
+
+ if (query instanceof SqlOrderBy) {
+ SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
+ // This represents the underlying query free of OFFSET, FETCH
and ORDER BY clauses
+ // For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo
OFFSET 10 FETCH 30 ORDER BY dim1 GROUP
+ // BY dim1 this would represent the "SELECT dim1, sum(dim2) from
foo GROUP BY dim1
+ query = sqlOrderBy.query;
+ offset = sqlOrderBy.offset;
+ fetch = sqlOrderBy.fetch;
+ }
+ // Creates a new SqlOrderBy query, which may have our CLUSTERED BY
overwritten
+ query = new SqlOrderBy(
+ query.getParserPosition(),
+ query,
+ druidSqlInsert.getClusteredBy(),
+ offset,
+ fetch
+ );
+ }
+ } else if (query instanceof DruidSqlReplace) {
+ druidSqlReplace = (DruidSqlReplace) query;
+ query = druidSqlReplace.getSource();
+ // Check if ORDER BY clause is not provided to the underlying query
if (query instanceof SqlOrderBy) {
SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
- // This represents the underlying query free of OFFSET, FETCH and
ORDER BY clauses
- // For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo
OFFSET 10 FETCH 30 ORDER BY dim1 GROUP BY dim1
- // this would represent the "SELECT dim1, sum(dim2) from foo GROUP
BY dim1
- query = sqlOrderBy.query;
- offset = sqlOrderBy.offset;
- fetch = sqlOrderBy.fetch;
+ SqlNodeList orderByList = sqlOrderBy.orderList;
+ if (!(orderByList == null ||
orderByList.equals(SqlNodeList.EMPTY))) {
+ throw new ValidationException("Cannot have ORDER BY on a REPLACE
query.");
+ }
}
- // Creates a new SqlOrderBy query, which may have our CLUSTERED BY
overwritten
- query = new SqlOrderBy(
- query.getParserPosition(),
- query,
- druidSqlInsert.getClusteredBy(),
- offset,
- fetch
- );
+
+ List<String> replaceTimeChunksList =
druidSqlReplace.getReplaceTimeChunks();
+ if (replaceTimeChunksList == null ||
replaceTimeChunksList.isEmpty()) {
+ throw new ValidationException("Missing partition specs for
replace. Use FOR statement to specify them.");
Review Comment:
nit: ~~statement~~ clause
##########
sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlReplace.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.parser;
+
+import com.google.common.base.Preconditions;
+import org.apache.calcite.sql.SqlInsert;
+import org.apache.calcite.sql.SqlNodeList;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlWriter;
+import org.apache.druid.java.util.common.granularity.Granularity;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.List;
+
+/**
+ * Extends the 'replace' call to hold custom parameters specific to Druid i.e.
PARTITIONED BY and the PARTITION SPECS
+ * This class extends the {@link SqlInsert} so that this SqlNode can be used in
+ * {@link org.apache.calcite.sql2rel.SqlToRelConverter} for getting converted
into RelNode, and further processing
+ */
+public class DruidSqlReplace extends SqlInsert
+{
+ public static final String SQL_REPLACE_TIME_CHUNKS = "sqlReplaceTimeChunks";
+
+ // This allows reusing super.unparse
Review Comment:
Are we using `super.unparse` in this method? If not, then maybe we can use a
different operator and remove this comment as well.
##########
sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlReplace.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.parser;
+
+import com.google.common.base.Preconditions;
+import org.apache.calcite.sql.SqlInsert;
+import org.apache.calcite.sql.SqlNodeList;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlWriter;
+import org.apache.druid.java.util.common.granularity.Granularity;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.List;
+
+/**
+ * Extends the 'replace' call to hold custom parameters specific to Druid i.e.
PARTITIONED BY and the PARTITION SPECS
+ * This class extends the {@link SqlInsert} so that this SqlNode can be used in
+ * {@link org.apache.calcite.sql2rel.SqlToRelConverter} for getting converted
into RelNode, and further processing
+ */
+public class DruidSqlReplace extends SqlInsert
+{
+ public static final String SQL_REPLACE_TIME_CHUNKS = "sqlReplaceTimeChunks";
+
+ // This allows reusing super.unparse
+ public static final SqlOperator OPERATOR = SqlInsert.OPERATOR;
+
+ private final Granularity partitionedBy;
+ private final String partitionedByStringForUnparse;
+
+ private final List<String> replaceTimeChunks;
+
+ /**
+ * While partitionedBy and partitionedByStringForUnparse can be null as
arguments to the constructor, this is
+ * disallowed (semantically) and the constructor performs checks to ensure
that. This helps in producing friendly
+ * errors when the PARTITIONED BY custom clause is not present, and keeps
its error separate from JavaCC/Calcite's
+ * custom errors which can be cryptic when someone accidentally forgets to
explicitly specify the PARTITIONED BY clause
+ */
+ public DruidSqlReplace(
+ @Nonnull SqlInsert insertNode,
+ @Nullable Granularity partitionedBy,
+ @Nullable String partitionedByStringForUnparse,
+ @Nonnull List<String> replaceTimeChunks
+ ) throws ParseException
+ {
+ super(
+ insertNode.getParserPosition(),
+ (SqlNodeList) insertNode.getOperandList().get(0), // No better getter
to extract this
+ insertNode.getTargetTable(),
+ insertNode.getSource(),
+ insertNode.getTargetColumnList()
+ );
+ if (partitionedBy == null) {
+ throw new ParseException("REPLACE statements must specify PARTITIONED BY
clause explictly");
+ }
+ this.partitionedBy = partitionedBy;
+
+ Preconditions.checkNotNull(partitionedByStringForUnparse);
+ this.partitionedByStringForUnparse = partitionedByStringForUnparse;
+
+ this.replaceTimeChunks = replaceTimeChunks;
+ }
+
+ public List<String> getReplaceTimeChunks()
+ {
+ return replaceTimeChunks;
+ }
+
+ public Granularity getPartitionedBy()
+ {
+ return partitionedBy;
+ }
+
+ @Nonnull
+ @Override
+ public SqlOperator getOperator()
+ {
+ return OPERATOR;
+ }
+
+ @Override
+ public void unparse(SqlWriter writer, int leftPrec, int rightPrec)
+ {
+ writer.startList(SqlWriter.FrameTypeEnum.SELECT);
+ writer.sep("REPLACE INTO");
+ final int opLeft = getOperator().getLeftPrec();
+ final int opRight = getOperator().getRightPrec();
+ getTargetTable().unparse(writer, opLeft, opRight);
+
+ writer.keyword("FOR");
+ final SqlWriter.Frame frame = writer.startList("(", ")");
+ List<String> replaceTimeChunks = getReplaceTimeChunks();
+ for (String replaceTimeChunk : replaceTimeChunks) {
+ writer.sep(","); // sep() takes care of not printing the first separator
+ writer.literal(unparseTimeChunk(replaceTimeChunk));
+ }
+ writer.endList(frame);
+
+ if (getTargetColumnList() != null) {
+ getTargetColumnList().unparse(writer, opLeft, opRight);
+ }
+ writer.newlineAndIndent();
+ getSource().unparse(writer, 0, 0);
+ writer.keyword("PARTITIONED BY");
+ writer.keyword(partitionedByStringForUnparse);
+ }
+
+ private String unparseTimeChunk(String timeChunkString)
+ {
+ if ("all".equals(timeChunkString)) {
Review Comment:
nit: `equals` or `equalsIgnoreCase` ?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]