kfaraz commented on code in PR #18786:
URL: https://github.com/apache/druid/pull/18786#discussion_r2572839217


##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(
+        b -> b.submitSqlTask(
+            new ClientSqlQuery(
+                "REPLACE INTO t\n"
+                + "OVERWRITE ALL\n"
+                + "SELECT CURRENT_TIMESTAMP AS __time, 1 AS d\n"
+                + "PARTITIONED BY ALL",
+                null,
+                false,
+                false,
+                false,
+                Map.of(),
+                List.of()
+            )
+        )
+    );
+    cluster.callApi().waitForTaskToSucceed(ingestionStatus.getTaskId(), 
overlord);
+    try {
+      Thread.sleep(1000L);
+    }
+    catch (InterruptedException e) {
+      throw new AssertionError(e);
+    }
+  }
+
+  @Test
+  public void testSqlParseException()
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(
+                b -> b.submitSqlQuery(
+                    new ClientSqlQuery(
+                        "count(*) FROM t",
+                        null,
+                        false,
+                        false,
+                        false,
+                        Map.of(),
+                        List.of()
+                    )
+                )
+            )
+        ),
+        ExceptionMatcher.of(HttpResponseException.class)
+                        .expectMessageContains("400 Bad Request")
+    );
+  }
+
+  @Test
+  public void testSqlValidationException()
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(

Review Comment:
   Consider using the shorthand `cluster.runSql()` in these cases.



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(

Review Comment:
   Let's use concrete types instead of `var`. I think Druid has mostly stuck to 
the convention of concrete types so far in the code.



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(
+        b -> b.submitSqlTask(
+            new ClientSqlQuery(
+                "REPLACE INTO t\n"

Review Comment:
   Let's use a better datasource name. You can try to use 
`EmbeddedClusterApis.createTestDatasourceName()`.



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches

Review Comment:
   Please convert this to a regular comment instead of TODO.



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(

Review Comment:
   There is a shorthand available for this as well. You would need to create an 
`EmbeddedMsqApis` instance and then call `embeddedMsqApis.submitTaskSql`.



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(
+        b -> b.submitSqlTask(
+            new ClientSqlQuery(
+                "REPLACE INTO t\n"
+                + "OVERWRITE ALL\n"
+                + "SELECT CURRENT_TIMESTAMP AS __time, 1 AS d\n"
+                + "PARTITIONED BY ALL",
+                null,
+                false,
+                false,
+                false,
+                Map.of(),
+                List.of()
+            )
+        )
+    );
+    cluster.callApi().waitForTaskToSucceed(ingestionStatus.getTaskId(), 
overlord);
+    try {
+      Thread.sleep(1000L);
+    }
+    catch (InterruptedException e) {
+      throw new AssertionError(e);
+    }
+  }
+
+  @Test
+  public void testSqlParseException()
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(
+                b -> b.submitSqlQuery(
+                    new ClientSqlQuery(
+                        "count(*) FROM t",
+                        null,
+                        false,
+                        false,
+                        false,
+                        Map.of(),
+                        List.of()
+                    )
+                )
+            )
+        ),
+        ExceptionMatcher.of(HttpResponseException.class)
+                        .expectMessageContains("400 Bad Request")
+    );
+  }
+
+  @Test
+  public void testSqlValidationException()
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(
+                b -> b.submitSqlQuery(
+                    new ClientSqlQuery(
+                        "SELECT count(*) FROM lol_kek",
+                        null,
+                        false,
+                        false,
+                        false,
+                        Map.of(),
+                        List.of()
+                    )
+                )
+            )
+        ),
+        ExceptionMatcher.of(HttpResponseException.class)
+                        .expectMessageContains("400 Bad Request")
+    );
+  }
+
+  @ParameterizedTest
+  @FieldSource("SHOULD_USE_SQL_ENGINE")
+  public void testQueryTimeout(boolean shouldUseSqlEngine)
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(
+                b -> queryFuture(b, shouldUseSqlEngine, 
QUERY_TIMEOUT_TEST_CONTEXT_KEY)
+            )
+        ),
+        ExceptionMatcher.of(HttpResponseException.class)
+                        .expectMessageContains("504")
+    );
+  }
+
+  @ParameterizedTest
+  @FieldSource("SHOULD_USE_SQL_ENGINE")
+  public void testQueryCapacityExceeded(boolean shouldUseSqlEngine)
+  {
+    MatcherAssert.assertThat(
+        Assertions.assertThrows(
+            Exception.class,
+            () -> cluster.callApi().onAnyBroker(
+                b -> queryFuture(
+                    b,
+                    shouldUseSqlEngine,
+                    QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY
+                )
+            )
+        ),
+        ExceptionMatcher.of(HttpResponseException.class)
+                        .expectMessageContains("429")
+    );
+  }
+
+  @ParameterizedTest
+  @FieldSource("SHOULD_USE_SQL_ENGINE")
+  public void testQueryUnsupported(boolean shouldUseSqlEngine)
+  {
+    MatcherAssert.assertThat(

Review Comment:
   Rather than using a `shouldUseSqlEngine` flag, it would be better to 
separately test the native and SQL APIs. It is okay to have some duplication if 
it makes the tests more self-explanatory.
   
   You might need to add new methods to `EmbeddedClusterApis`:
   - `runSql(...)` which accepts a `Map` queryContext
   - `runNativeQuery(...)` to act as shorthand for `onAnyBroker(b -> 
b.submitNativeQuery(...))`
   
   For example:
   ```java
   final Map<...> queryContext = buildTestContext(...);
   
   // Verify behaviour of native query
   MatcherAssert.assertThat(
           Assertions.assertThrows(
               Exception.class,
               () -> 
cluster.callApi().runNativeQuery(bulidScanQuery(queryContext));
           ),
           
ExceptionMatcher.of(HttpResponseException.class).expectMessageContains("429")
   );
   
   // Verify behaviour of SQL query
   MatcherAssert.assertThat(
           Assertions.assertThrows(
               Exception.class,
               () -> cluster.callApi().runSql(context, "SELECT * FROM %s", 
dataSource);
           ),
           
ExceptionMatcher.of(HttpResponseException.class).expectMessageContains("429")
       );
   
   ```



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/query/QueryErrorTest.java:
##########
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.query;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.druid.client.broker.BrokerClient;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.query.Druids;
+import org.apache.druid.query.QueryContexts;
+import org.apache.druid.query.http.ClientSqlQuery;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.FieldSource;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_FAILURE_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_TIMEOUT_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.QUERY_UNSUPPORTED_TEST_CONTEXT_KEY;
+import static 
org.apache.druid.testing.embedded.query.ServerManagerForQueryErrorTest.RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY;
+
+/**
+ * This class tests various query failures.
+ * <p>
+ * - SQL planning failures. Both {@link 
org.apache.calcite.sql.parser.SqlParseException}
+ * and {@link org.apache.calcite.tools.ValidationException} are tested using 
SQLs that must fail.
+ * - Various query errors from historicals. These tests use {@link 
ServerManagerForQueryErrorTest} to make
+ * the query to always throw an exception.
+ */
+public class QueryErrorTest extends QueryTestBase
+{
+  //  TODO: introduce onAnyRouter(...) and use it; add TLS tests in the 
follow-up patches
+  protected static List<Boolean> SHOULD_USE_SQL_ENGINE = List.of(true, false);
+
+  @Override
+  protected EmbeddedDruidCluster createCluster()
+  {
+    overlord.addProperty("druid.manager.segments.pollDuration", "PT0.1s");
+    indexer.setServerMemory(600_000_000)
+           .addProperty("druid.worker.capacity", "4")
+           .addProperty("druid.processing.numThreads", "2")
+           .addProperty("druid.segment.handoff.pollDuration", "PT0.1s");
+
+    return EmbeddedDruidCluster.withEmbeddedDerbyAndZookeeper()
+                               .useLatchableEmitter()
+                               .addServer(overlord)
+                               .addServer(coordinator)
+                               .addServer(broker)
+                               .addServer(router)
+                               .addServer(indexer)
+                               .addServer(historical)
+                               
.addExtension(ServerManagerForQueryErrorTestModule.class);
+  }
+
+  @Override
+  protected void beforeAll()
+  {
+    var ingestionStatus = cluster.callApi().onAnyBroker(
+        b -> b.submitSqlTask(
+            new ClientSqlQuery(
+                "REPLACE INTO t\n"
+                + "OVERWRITE ALL\n"
+                + "SELECT CURRENT_TIMESTAMP AS __time, 1 AS d\n"
+                + "PARTITIONED BY ALL",
+                null,
+                false,
+                false,
+                false,
+                Map.of(),
+                List.of()
+            )
+        )
+    );
+    cluster.callApi().waitForTaskToSucceed(ingestionStatus.getTaskId(), 
overlord);
+    try {
+      Thread.sleep(1000L);

Review Comment:
   Why is this sleep needed?



##########
integration-tests/src/test/java/org/apache/druid/tests/query/ITQueryRetryTestOnMissingSegments.java:
##########
@@ -52,12 +51,11 @@
  * This class tests the query retry on missing segments. A segment can be 
missing in a historical during a query if
  * the historical drops the segment after the broker issues the query to the 
historical. To mimic this case, this
  * test spawns a historical modified for testing. This historical announces 
all segments assigned, but doesn't serve
- * all of them always. Instead, it can report missing segments for some
- * segments. See {@link ServerManagerForQueryErrorTest} for more details.
+ * all of them always. Instead, it can report missing segments for some 
segments.
  * <p>
  * To run this test properly, the test group must be specified as {@link 
TestNGGroup#QUERY_RETRY}.
  */
-@Test(groups = TestNGGroup.QUERY_RETRY)
+@Test(groups = TestNGGroup.QUERY_RETRY, enabled = false) // TODO: Temporarily 
disabled due to migration to embedded suite.
 @Guice(moduleFactory = DruidTestModuleFactory.class)
 public class ITQueryRetryTestOnMissingSegments

Review Comment:
   @Fly-Style , is this test not being migrated in this PR?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to