zzwqqq created CALCITE-7415:
-------------------------------
Summary: CalciteCatalogReader.lookupOperatorOverloads keeps
original function identifier casing instead of resolved schema-path casing
Key: CALCITE-7415
URL: https://issues.apache.org/jira/browse/CALCITE-7415
Project: Calcite
Issue Type: Bug
Components: core
Reporter: zzwqqq
CalciteCatalogReader#lookupOperatorOverloads resolves the function from schema,
but the operator name identifier may still keep the original input casing
(catalog/schema/function).
As a result, SqlUserDefinedFunction / RelToSql may unparse qualified UDF names
with incorrect case, which can break SQL execution on case-sensitive dialects
or environments.
Steps to reproduce:
{code:java}
import org.apache.calcite.config.Lex;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.rel2sql.RelToSqlConverter;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.schema.impl.AbstractSchema;
import org.apache.calcite.schema.impl.AbstractTable;
import org.apache.calcite.schema.impl.ScalarFunctionImpl;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.dialect.OracleSqlDialect;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.tools.FrameworkConfig;
import org.apache.calcite.tools.Frameworks;
import org.apache.calcite.tools.Planner;
public class FunctionIdentifierCaseDemo {
public static class UdfLib {
public static int MyFunc(int x) {
return x;
}
}
public static void main(String[] args) throws Exception {
SchemaPlus root = Frameworks.createRootSchema(true);
SchemaPlus catalog = root.add("MyCatalog", new AbstractSchema());
SchemaPlus schema = catalog.add("MySchema", new AbstractSchema());
schema.add("Emp", new AbstractTable() {
@Override public RelDataType getRowType(RelDataTypeFactory typeFactory) {
return typeFactory.builder()
.add("EMPNO", SqlTypeName.INTEGER)
.build();
}
});
schema.add("MyFunc", ScalarFunctionImpl.create(UdfLib.class, "MyFunc"));
FrameworkConfig config = Frameworks.newConfigBuilder()
.defaultSchema(root)
.parserConfig(SqlParser.config()
.withLex(Lex.ORACLE)
.withCaseSensitive(false))
.build();
String query = "SELECT MYCATALOG.MYSCHEMA.MYFUNC(empno) "
+ "FROM MYCATALOG.MYSCHEMA.EMP";
try (Planner planner = Frameworks.getPlanner(config)) {
SqlNode parsed = planner.parse(query);
SqlNode validated = planner.validate(parsed);
RelNode rel = planner.rel(validated).project();
SqlNode outNode = new RelToSqlConverter(OracleSqlDialect.DEFAULT)
.visitRoot(rel)
.asStatement();
String out = outNode.toSqlString(OracleSqlDialect.DEFAULT).getSql();
System.out.println("Input SQL:");
System.out.println(query);
System.out.println("\nRelToSql (Oracle):");
System.out.println(out);
}
}
} {code}
expected generated sql:
{code:java}
RelToSql (Oracle):
SELECT "MyCatalog"."MySchema"."MyFunc"("EMPNO")
FROM "MyCatalog"."MySchema"."Emp" {code}
but actual is:
{code:java}
RelToSql (Oracle):
SELECT "MYCATALOG"."MYSCHEMA"."MYFUNC"("EMPNO")
FROM "MyCatalog"."MySchema"."Emp" {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)