Jackie-Jiang commented on code in PR #16626:
URL: https://github.com/apache/pinot/pull/16626#discussion_r2304964327


##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryResource.java:
##########
@@ -204,6 +249,49 @@ public QueryErrorCode getErrorCode() {
     }
   }
 
+  public static class MultiStageQueryValidationRequest {
+    private final String _sql;
+    private final List<TableConfig> _tableConfigs;
+    private final List<Schema> _schemas;
+    private final List<LogicalTableConfig> _logicalTableConfigs;
+    private final Boolean _ignoreCase;
+
+    @JsonCreator
+    public MultiStageQueryValidationRequest(@JsonProperty("sql") String sql,
+        @JsonProperty("tableConfigs") @Nullable List<TableConfig> tableConfigs,
+        @JsonProperty("schemas") @Nullable List<Schema> schemas,
+        @JsonProperty("logicalTableConfigs") @Nullable 
List<LogicalTableConfig> logicalTableConfigs,
+        @JsonProperty("ignoreCase") @Nullable Boolean ignoreCase) {

Review Comment:
   Make this primitive boolean, same for the member variable and return type of 
the getter



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryResource.java:
##########
@@ -204,6 +249,49 @@ public QueryErrorCode getErrorCode() {
     }
   }
 
+  public static class MultiStageQueryValidationRequest {
+    private final String _sql;
+    private final List<TableConfig> _tableConfigs;
+    private final List<Schema> _schemas;
+    private final List<LogicalTableConfig> _logicalTableConfigs;
+    private final Boolean _ignoreCase;
+
+    @JsonCreator
+    public MultiStageQueryValidationRequest(@JsonProperty("sql") String sql,
+        @JsonProperty("tableConfigs") @Nullable List<TableConfig> tableConfigs,
+        @JsonProperty("schemas") @Nullable List<Schema> schemas,
+        @JsonProperty("logicalTableConfigs") @Nullable 
List<LogicalTableConfig> logicalTableConfigs,
+        @JsonProperty("ignoreCase") @Nullable Boolean ignoreCase) {
+      _sql = sql;
+      _tableConfigs = tableConfigs != null ? tableConfigs : new ArrayList<>();
+      _schemas = schemas != null ? schemas : new ArrayList<>();
+      _logicalTableConfigs = logicalTableConfigs != null ? logicalTableConfigs 
: new ArrayList<>();

Review Comment:
   Since the getter is `nullable`, no need to assign empty array



##########
pinot-common/src/main/java/org/apache/pinot/common/config/provider/ZkTableCache.java:
##########
@@ -646,44 +640,6 @@ public synchronized void handleDataDeleted(String path) {
     }
   }
 
-  private static Map<Expression, Expression> 
createExpressionOverrideMap(String physicalOrLogicalTableName,
-    QueryConfig queryConfig) {
-    Map<Expression, Expression> expressionOverrideMap = new TreeMap<>();
-    if (queryConfig != null && 
MapUtils.isNotEmpty(queryConfig.getExpressionOverrideMap())) {
-      for (Map.Entry<String, String> entry : 
queryConfig.getExpressionOverrideMap().entrySet()) {
-        try {
-          Expression srcExp = 
CalciteSqlParser.compileToExpression(entry.getKey());
-          Expression destExp = 
CalciteSqlParser.compileToExpression(entry.getValue());
-          expressionOverrideMap.put(srcExp, destExp);
-        } catch (Exception e) {
-          LOGGER.warn("Caught exception while compiling expression override: 
{} -> {} for table: {}, skipping it",
-            entry.getKey(), entry.getValue(), physicalOrLogicalTableName);
-        }
-      }
-      int mapSize = expressionOverrideMap.size();
-      if (mapSize == 1) {
-        Map.Entry<Expression, Expression> entry = 
expressionOverrideMap.entrySet().iterator().next();
-        return Collections.singletonMap(entry.getKey(), entry.getValue());
-      } else if (mapSize > 1) {
-        return expressionOverrideMap;
-      }
-    }
-    return null;
-  }
-
-  private static class TableConfigInfo {
-    final TableConfig _tableConfig;
-    final Map<Expression, Expression> _expressionOverrideMap;
-    // All the timestamp with granularity column names
-    final Set<String> _timestampIndexColumns;
-
-    private TableConfigInfo(TableConfig tableConfig) {
-      _tableConfig = tableConfig;
-      _expressionOverrideMap = 
createExpressionOverrideMap(tableConfig.getTableName(), 
tableConfig.getQueryConfig());
-      _timestampIndexColumns = 
TimestampIndexUtils.extractColumnsWithGranularity(tableConfig);
-    }
-  }
-
   private static class SchemaInfo {

Review Comment:
   Do you need to also move `SchemaInfo` into `TableCache`?



##########
pinot-common/src/main/java/org/apache/pinot/common/config/provider/StaticTableCache.java:
##########
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.config.provider;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import javax.annotation.Nullable;
+import org.apache.pinot.common.request.Expression;
+import org.apache.pinot.spi.config.provider.LogicalTableConfigChangeListener;
+import org.apache.pinot.spi.config.provider.SchemaChangeListener;
+import org.apache.pinot.spi.config.provider.TableConfigChangeListener;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.LogicalTableConfig;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.utils.CommonConstants.Segment.BuiltInVirtualColumn;
+import org.apache.pinot.spi.utils.builder.TableNameBuilder;
+import org.jvnet.hk2.annotations.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A static implementation that works with pre-loaded table configs and 
schemas.
+ * This is useful for validation scenarios where you want to test query 
compilation against a specific
+ * set of table configs and schemas without needing a live cluster.
+ */
+public class StaticTableCache implements TableCache {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(StaticTableCache.class);
+
+  private final boolean _ignoreCase;
+  private final Map<String, TableConfig> _tableConfigMap = new HashMap<>();
+  private final Map<String, Schema> _schemaMap = new HashMap<>();
+  private final Map<String, LogicalTableConfig> _logicalTableConfigMap = new 
HashMap<>();
+  private final Map<String, String> _tableNameMap = new HashMap<>();
+  private final Map<String, TableConfigInfo> _tableConfigInfoMap = new 
ConcurrentHashMap<>();
+  private final Map<String, LogicalTableConfigInfo> _logicalTableConfigInfoMap 
= new ConcurrentHashMap<>();
+  private final Map<String, String> _logicalTableNameMap = new HashMap<>();
+  private final Map<String, Map<String, String>> _columnNameMaps = new 
HashMap<>();
+
+  public StaticTableCache(List<TableConfig> tableConfigs, List<Schema> schemas,
+      @Optional List<LogicalTableConfig> logicalTableConfigs, boolean 
ignoreCase) {

Review Comment:
   We don't usually use `@Optional`. Use `@Nullable` instead



##########
pinot-common/src/main/java/org/apache/pinot/common/config/provider/StaticTableCache.java:
##########
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.config.provider;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import javax.annotation.Nullable;
+import org.apache.pinot.common.request.Expression;
+import org.apache.pinot.spi.config.provider.LogicalTableConfigChangeListener;
+import org.apache.pinot.spi.config.provider.SchemaChangeListener;
+import org.apache.pinot.spi.config.provider.TableConfigChangeListener;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.LogicalTableConfig;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.utils.CommonConstants.Segment.BuiltInVirtualColumn;
+import org.apache.pinot.spi.utils.builder.TableNameBuilder;
+import org.jvnet.hk2.annotations.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A static implementation that works with pre-loaded table configs and 
schemas.
+ * This is useful for validation scenarios where you want to test query 
compilation against a specific
+ * set of table configs and schemas without needing a live cluster.
+ */
+public class StaticTableCache implements TableCache {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(StaticTableCache.class);
+
+  private final boolean _ignoreCase;
+  private final Map<String, TableConfig> _tableConfigMap = new HashMap<>();
+  private final Map<String, Schema> _schemaMap = new HashMap<>();
+  private final Map<String, LogicalTableConfig> _logicalTableConfigMap = new 
HashMap<>();
+  private final Map<String, String> _tableNameMap = new HashMap<>();
+  private final Map<String, TableConfigInfo> _tableConfigInfoMap = new 
ConcurrentHashMap<>();
+  private final Map<String, LogicalTableConfigInfo> _logicalTableConfigInfoMap 
= new ConcurrentHashMap<>();
+  private final Map<String, String> _logicalTableNameMap = new HashMap<>();
+  private final Map<String, Map<String, String>> _columnNameMaps = new 
HashMap<>();
+
+  public StaticTableCache(List<TableConfig> tableConfigs, List<Schema> schemas,
+      @Optional List<LogicalTableConfig> logicalTableConfigs, boolean 
ignoreCase) {
+    _ignoreCase = ignoreCase;
+
+    for (TableConfig tableConfig : tableConfigs) {
+      String tableNameWithType = tableConfig.getTableName();
+      String rawTableName = 
TableNameBuilder.extractRawTableName(tableNameWithType);
+
+      _tableConfigMap.put(tableNameWithType, tableConfig);
+      _tableConfigInfoMap.put(tableNameWithType, new 
TableConfigInfo(tableConfig));
+      if (_ignoreCase) {
+        _tableNameMap.put(tableNameWithType.toLowerCase(), tableNameWithType);
+        _tableNameMap.put(rawTableName.toLowerCase(), rawTableName);
+      } else {
+        _tableNameMap.put(tableNameWithType, tableNameWithType);
+        _tableNameMap.put(rawTableName, rawTableName);
+      }
+    }
+
+    for (Schema schema : schemas) {
+      String schemaName = schema.getSchemaName();
+      _schemaMap.put(schemaName, schema);
+      Map<String, String> columnNameMap = new 
TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+      for (FieldSpec fieldSpec : schema.getAllFieldSpecs()) {
+        String columnName = fieldSpec.getName();
+        if (_ignoreCase) {
+          columnNameMap.put(columnName.toLowerCase(), columnName);
+        } else {
+          columnNameMap.put(columnName, columnName);
+        }
+      }
+      addBuiltInVirtualColumns(columnNameMap);
+      _columnNameMaps.put(schemaName, 
Collections.unmodifiableMap(columnNameMap));
+    }
+
+    if (logicalTableConfigs != null) {
+      for (LogicalTableConfig logicalTableConfig : logicalTableConfigs) {
+        String logicalTableName = logicalTableConfig.getTableName();
+        _logicalTableConfigMap.put(logicalTableName, logicalTableConfig);
+        _logicalTableConfigInfoMap.put(logicalTableName, new 
LogicalTableConfigInfo(logicalTableConfig));
+        if (_ignoreCase) {
+          _logicalTableNameMap.put(logicalTableName.toLowerCase(), 
logicalTableName);
+        } else {
+          _logicalTableNameMap.put(logicalTableName, logicalTableName);
+        }
+      }
+    }
+
+    LOGGER.info(
+        "Initialized QueryValidator with {} table configs, {} schemas, {} 
logical table configs (ignoreCase: {})",
+        _tableConfigMap.size(), _schemaMap.size(), 
_logicalTableNameMap.size(), ignoreCase);
+  }
+
+  @Override
+  public boolean isIgnoreCase() {
+    return _ignoreCase;
+  }
+
+  @Nullable
+  @Override
+  public String getActualTableName(String tableName) {
+    if (_ignoreCase) {
+      return _tableNameMap.get(tableName.toLowerCase());
+    } else {
+      return _tableNameMap.get(tableName);
+    }
+  }
+
+  @Override
+  @Nullable

Review Comment:
   (nit) `@Nullable` above `@Override`



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryResource.java:
##########
@@ -153,27 +159,66 @@ public StreamingOutput 
handleTimeSeriesQueryRange(@QueryParam("language") String
 
   @POST
   @Path("validateMultiStageQuery")
-  public MultiStageQueryValidationResponse validateMultiStageQuery(String 
requestJsonStr,
+  public MultiStageQueryValidationResponse 
validateMultiStageQuery(MultiStageQueryValidationRequest request,
       @Context HttpHeaders httpHeaders) {
-    JsonNode requestJson;
-    try {
-      requestJson = JsonUtils.stringToJsonNode(requestJsonStr);
-    } catch (Exception e) {
-      LOGGER.warn("Caught exception while parsing request {}", e.getMessage());
-      return new MultiStageQueryValidationResponse(false, "Failed to parse 
request JSON: " + e.getMessage(), null);
-    }
-    if (!requestJson.has("sql")) {
-      return new MultiStageQueryValidationResponse(false, "JSON Payload is 
missing the query string field 'sql'", null);
+
+    if (request.getSql() == null || request.getSql().trim().isEmpty()) {
+      return new MultiStageQueryValidationResponse(false, "Request is missing 
the query string field 'sql'", null);
     }
-    String sqlQuery = requestJson.get("sql").asText();
+
+    String sqlQuery = request.getSql();
     Map<String, String> queryOptionsMap = 
RequestUtils.parseQuery(sqlQuery).getOptions();
     String database = 
DatabaseUtils.extractDatabaseFromQueryRequest(queryOptionsMap, httpHeaders);
-    try (QueryEnvironment.CompiledQuery compiledQuery = new 
QueryEnvironment(database,
-        _pinotHelixResourceManager.getTableCache(), null).compile(sqlQuery)) {
-      return new MultiStageQueryValidationResponse(true, null, null);
+
+    try {
+      TableCache tableCache;
+      if (request.getTableConfigs() != null && 
!request.getTableConfigs().isEmpty() && request.getSchemas() != null
+          && !request.getSchemas().isEmpty()) {
+        List<TableConfig> tableConfigs = new ArrayList<>();
+        if (request.getTableConfigs() != null) {

Review Comment:
   redundant check



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryResource.java:
##########
@@ -153,27 +159,66 @@ public StreamingOutput 
handleTimeSeriesQueryRange(@QueryParam("language") String
 
   @POST
   @Path("validateMultiStageQuery")
-  public MultiStageQueryValidationResponse validateMultiStageQuery(String 
requestJsonStr,
+  public MultiStageQueryValidationResponse 
validateMultiStageQuery(MultiStageQueryValidationRequest request,
       @Context HttpHeaders httpHeaders) {
-    JsonNode requestJson;
-    try {
-      requestJson = JsonUtils.stringToJsonNode(requestJsonStr);
-    } catch (Exception e) {
-      LOGGER.warn("Caught exception while parsing request {}", e.getMessage());
-      return new MultiStageQueryValidationResponse(false, "Failed to parse 
request JSON: " + e.getMessage(), null);
-    }
-    if (!requestJson.has("sql")) {
-      return new MultiStageQueryValidationResponse(false, "JSON Payload is 
missing the query string field 'sql'", null);
+
+    if (request.getSql() == null || request.getSql().trim().isEmpty()) {
+      return new MultiStageQueryValidationResponse(false, "Request is missing 
the query string field 'sql'", null);
     }
-    String sqlQuery = requestJson.get("sql").asText();
+
+    String sqlQuery = request.getSql();
     Map<String, String> queryOptionsMap = 
RequestUtils.parseQuery(sqlQuery).getOptions();
     String database = 
DatabaseUtils.extractDatabaseFromQueryRequest(queryOptionsMap, httpHeaders);
-    try (QueryEnvironment.CompiledQuery compiledQuery = new 
QueryEnvironment(database,
-        _pinotHelixResourceManager.getTableCache(), null).compile(sqlQuery)) {
-      return new MultiStageQueryValidationResponse(true, null, null);
+
+    try {
+      TableCache tableCache;
+      if (request.getTableConfigs() != null && 
!request.getTableConfigs().isEmpty() && request.getSchemas() != null

Review Comment:
   Use `CollectionUtils.isNotEmpty()`



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryResource.java:
##########
@@ -153,27 +159,66 @@ public StreamingOutput 
handleTimeSeriesQueryRange(@QueryParam("language") String
 
   @POST
   @Path("validateMultiStageQuery")
-  public MultiStageQueryValidationResponse validateMultiStageQuery(String 
requestJsonStr,
+  public MultiStageQueryValidationResponse 
validateMultiStageQuery(MultiStageQueryValidationRequest request,
       @Context HttpHeaders httpHeaders) {
-    JsonNode requestJson;
-    try {
-      requestJson = JsonUtils.stringToJsonNode(requestJsonStr);
-    } catch (Exception e) {
-      LOGGER.warn("Caught exception while parsing request {}", e.getMessage());
-      return new MultiStageQueryValidationResponse(false, "Failed to parse 
request JSON: " + e.getMessage(), null);
-    }
-    if (!requestJson.has("sql")) {
-      return new MultiStageQueryValidationResponse(false, "JSON Payload is 
missing the query string field 'sql'", null);
+
+    if (request.getSql() == null || request.getSql().trim().isEmpty()) {
+      return new MultiStageQueryValidationResponse(false, "Request is missing 
the query string field 'sql'", null);
     }
-    String sqlQuery = requestJson.get("sql").asText();
+
+    String sqlQuery = request.getSql();
     Map<String, String> queryOptionsMap = 
RequestUtils.parseQuery(sqlQuery).getOptions();
     String database = 
DatabaseUtils.extractDatabaseFromQueryRequest(queryOptionsMap, httpHeaders);
-    try (QueryEnvironment.CompiledQuery compiledQuery = new 
QueryEnvironment(database,
-        _pinotHelixResourceManager.getTableCache(), null).compile(sqlQuery)) {
-      return new MultiStageQueryValidationResponse(true, null, null);
+
+    try {
+      TableCache tableCache;
+      if (request.getTableConfigs() != null && 
!request.getTableConfigs().isEmpty() && request.getSchemas() != null
+          && !request.getSchemas().isEmpty()) {
+        List<TableConfig> tableConfigs = new ArrayList<>();

Review Comment:
   Any specific reason why you want to make a copy? I think you can directly 
pass in the ones from the request



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to