This is an automated email from the ASF dual-hosted git repository.
kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 3e4c5cc2dd8 Add system properties table (#18692)
3e4c5cc2dd8 is described below
commit 3e4c5cc2dd8bd67e19473170da762eff038a3946
Author: Gabriel Chang <[email protected]>
AuthorDate: Thu Nov 20 14:11:22 2025 +0800
Add system properties table (#18692)
Changes
---------
- Add table `sys.server_properties` to read the runtime properties of each
service
- Add columns server, service_name, node_roles, property, value
- Add embedded tests
- Add docs
---
docs/querying/sql-metadata-tables.md | 19 ++
.../schema/SystemServerPropertiesTableTest.java | 179 ++++++++++++++++++
.../cli/CoordinatorJettyServerInitializer.java | 3 +-
.../druid/sql/calcite/schema/SystemSchema.java | 29 +--
.../schema/SystemServerPropertiesTable.java | 200 +++++++++++++++++++++
.../apache/druid/sql/calcite/CalciteQueryTest.java | 2 +
.../calcite/DruidPlannerResourceAnalyzeTest.java | 2 +
.../schema/DruidCalciteSchemaModuleTest.java | 6 +
.../druid/sql/calcite/schema/SystemSchemaTest.java | 135 +++++++++++++-
.../druid/sql/calcite/util/CalciteTests.java | 3 +-
.../org/apache/druid/sql/guice/SqlModuleTest.java | 9 +-
website/.spelling | 3 +
12 files changed, 573 insertions(+), 17 deletions(-)
diff --git a/docs/querying/sql-metadata-tables.md
b/docs/querying/sql-metadata-tables.md
index 6d59462fd1e..09d0db23d96 100644
--- a/docs/querying/sql-metadata-tables.md
+++ b/docs/querying/sql-metadata-tables.md
@@ -238,6 +238,7 @@ Servers table lists all discovered servers in the cluster.
|start_time|STRING|Timestamp in ISO8601 format when the server was announced
in the cluster|
|version|VARCHAR|Druid version running on the server|
|labels|VARCHAR|Labels for the server configured using the property
[`druid.labels`](../configuration/index.md)|
+
To retrieve information about all servers, use the query:
```sql
@@ -315,3 +316,21 @@ For example, to retrieve supervisor tasks information
filtered by health status,
```sql
SELECT * FROM sys.supervisors WHERE healthy=0;
```
+
+### SERVER_PROPERTIES table
+
+The `server_properties` table exposes the runtime properties configured on for
each Druid server. Each row represents a single property key-value pair
associated with a specific server.
+
+|Column|Type|Notes|
+|------|-----|-----|
+|server|VARCHAR|Host and port of the server, in the form `host:port`|
+|service_name|VARCHAR|Service name of the server, as defined by
`druid.service`|
+|node_roles|VARCHAR|Comma-separated list of roles that the server performs.
For example, `[coordinator,overlord]` if the server functions as both a
Coordinator and an Overlord.|
+|property|VARCHAR|Name of the property|
+|value|VARCHAR|Value of the property|
+
+For example, to retrieve properties for a specific server, use the query
+
+```sql
+SELECT * FROM sys.server_properties WHERE server='192.168.1.1:8081'
+```
\ No newline at end of file
diff --git
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java
new file mode 100644
index 00000000000..d5f062e0d47
--- /dev/null
+++
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.collect.ImmutableList;
+import org.apache.druid.discovery.NodeRole;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.rpc.RequestBuilder;
+import org.apache.druid.testing.embedded.EmbeddedBroker;
+import org.apache.druid.testing.embedded.EmbeddedCoordinator;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.apache.druid.testing.embedded.EmbeddedOverlord;
+import org.apache.druid.testing.embedded.junit5.EmbeddedClusterTestBase;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Map;
+
+public class SystemServerPropertiesTableTest extends EmbeddedClusterTestBase
+{
+ private static final String BROKER_PORT = "9082";
+ private static final String BROKER_SERVICE = "test/broker";
+ private static final String OVERLORD_PORT = "9090";
+ private static final String OVERLORD_SERVICE = "test/overlord";
+ private static final String COORDINATOR_PORT = "9081";
+ private static final String COORDINATOR_SERVICE = "test/coordinator";
+
+ private final EmbeddedBroker broker = new EmbeddedBroker()
+ .addProperty("druid.service", BROKER_SERVICE)
+ .addProperty("druid.plaintextPort", BROKER_PORT)
+ .addProperty("test.onlyBroker", "brokerValue")
+ .addProperty("test.nonUniqueProperty", "brokerNonUniqueValue")
+ .addProperty("password", "brokerPassword");
+
+ private final EmbeddedOverlord overlord = new EmbeddedOverlord()
+ .addProperty("druid.service", OVERLORD_SERVICE)
+ .addProperty("druid.plaintextPort", OVERLORD_PORT)
+ .addProperty("test.onlyOverlord", "overlordValue")
+ .addProperty("test.nonUniqueProperty", "overlordNonUniqueValue");
+
+ private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator()
+ .addProperty("druid.service", COORDINATOR_SERVICE)
+ .addProperty("druid.plaintextPort", COORDINATOR_PORT)
+ .addProperty("test.onlyCoordinator", "coordinatorValue")
+ .addProperty("test.nonUniqueProperty", "coordinatorNonUniqueValue");
+
+ @Override
+ protected EmbeddedDruidCluster createCluster()
+ {
+ return EmbeddedDruidCluster
+ .withZookeeper()
+ .addServer(coordinator)
+ .addServer(overlord)
+ .addServer(broker)
+ .addCommonProperty("commonProperty", "commonValue");
+ }
+
+ @Test
+ public void test_serverPropertiesTable_brokerServer()
+ {
+ final Map<String, String> brokerProps =
cluster.callApi().serviceClient().onAnyBroker(
+ mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
+ new TypeReference<>(){}
+ );
+ verifyPropertiesForServer(brokerProps, BROKER_SERVICE,
StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME);
+ }
+
+ @Test
+ public void test_serverPropertiesTable_overlordServer()
+ {
+ final Map<String, String> overlordProps =
cluster.callApi().serviceClient().onLeaderOverlord(
+ mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
+ new TypeReference<>(){}
+ );
+ verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE,
StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME);
+ }
+
+ @Test
+ public void test_serverPropertiesTable_coordinatorServer()
+ {
+ final Map<String, String> coordinatorProps =
cluster.callApi().serviceClient().onLeaderCoordinator(
+ mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
+ new TypeReference<>(){}
+ );
+ verifyPropertiesForServer(coordinatorProps, COORDINATOR_SERVICE,
StringUtils.format("localhost:%s", COORDINATOR_PORT),
NodeRole.COORDINATOR_JSON_NAME);
+ }
+
+ @Test
+ public void test_serverPropertiesTable_specificProperty()
+ {
+ Assertions.assertEquals(
+ "brokerValue",
+ cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE
server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT)
+ );
+
+ Assertions.assertEquals(
+ "brokerValue",
+ cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE
service_name = '%s' AND property = 'test.onlyBroker'", BROKER_SERVICE)
+ );
+
+ Assertions.assertEquals(
+ StringUtils.format("localhost:%s,%s,[%s],test.onlyBroker,brokerValue",
BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME),
+ cluster.runSql("SELECT * FROM sys.server_properties WHERE server =
'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT)
+ );
+
+ String[] expectedRows = new String[] {
+
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,brokerNonUniqueValue",
BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME),
+
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,overlordNonUniqueValue",
OVERLORD_PORT, OVERLORD_SERVICE, NodeRole.OVERLORD_JSON_NAME),
+
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,coordinatorNonUniqueValue",
COORDINATOR_PORT, COORDINATOR_SERVICE, NodeRole.COORDINATOR_JSON_NAME),
+ };
+ Arrays.sort(expectedRows, String::compareTo);
+ final String result = cluster.runSql("SELECT * FROM sys.server_properties
WHERE property='test.nonUniqueProperty'");
+ String[] actualRows = result.split("\n");
+ Arrays.sort(actualRows, String::compareTo);
+ Assertions.assertArrayEquals(expectedRows, actualRows);
+
+ }
+
+ @Test
+ public void test_serverPropertiesTable_hiddenProperties()
+ {
+ final Map<String, String> brokerProps =
cluster.callApi().serviceClient().onAnyBroker(
+ mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
+ new TypeReference<>(){}
+ );
+ Assertions.assertFalse(brokerProps.containsKey("password"));
+ }
+
+ private void verifyPropertiesForServer(Map<String, String> properties,
String serivceName, String hostAndPort, String nodeRole)
+ {
+ String[] expectedRows = properties.entrySet().stream().map(entry ->
String.join(
+ ",",
+ escapeCsvField(hostAndPort),
+ escapeCsvField(serivceName),
+ escapeCsvField(ImmutableList.of(nodeRole).toString()),
+ escapeCsvField(entry.getKey()),
+ escapeCsvField(entry.getValue())
+ )).toArray(String[]::new);
+ Arrays.sort(expectedRows, String::compareTo);
+ final String result = cluster.runSql("SELECT * FROM sys.server_properties
WHERE server='%s'", hostAndPort);
+ String[] actualRows = result.split("\n");
+ Arrays.sort(actualRows, String::compareTo);
+ Assertions.assertArrayEquals(expectedRows, actualRows);
+ }
+
+ /**
+ * Escapes a field value for CSV format.
+ */
+ private String escapeCsvField(String field)
+ {
+ if (field == null) {
+ return "";
+ }
+ if (field.contains(",") || field.contains("\"") || field.contains("\n") ||
field.contains("\r")) {
+ return "\"" + StringUtils.replace(field, "\"", "\"\"") + "\"";
+ }
+ return field;
+ }
+}
diff --git
a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java
b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java
index ece5fa2be4a..3fd6b41031b 100644
---
a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java
+++
b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java
@@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableList;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Key;
-import com.google.inject.servlet.GuiceFilter;
import org.apache.druid.guice.annotations.Json;
import org.apache.druid.server.http.OverlordProxyServlet;
import org.apache.druid.server.http.RedirectFilter;
@@ -110,7 +109,7 @@ class CoordinatorJettyServerInitializer implements
JettyServerInitializer
// add some paths not to be redirected to leader.
final FilterHolder guiceFilterHolder =
JettyServerInitUtils.getGuiceFilterHolder(injector);
- root.addFilter(GuiceFilter.class, "/status/*", null);
+ root.addFilter(guiceFilterHolder, "/status/*", null);
root.addFilter(guiceFilterHolder, "/druid-internal/*", null);
// redirect anything other than status to the current lead
diff --git
a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
index 1343ace28fe..6278506051c 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
@@ -53,12 +53,14 @@ import org.apache.druid.discovery.DataNodeService;
import org.apache.druid.discovery.DiscoveryDruidNode;
import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
import org.apache.druid.discovery.NodeRole;
+import org.apache.druid.guice.annotations.EscalatedClient;
import org.apache.druid.indexer.TaskStatusPlus;
import org.apache.druid.indexing.overlord.supervisor.SupervisorStatus;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.jackson.JacksonUtils;
import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.java.util.http.client.HttpClient;
import org.apache.druid.rpc.indexing.OverlordClient;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.RowSignature;
@@ -234,7 +236,8 @@ public class SystemSchema extends AbstractSchema
final CoordinatorClient coordinatorClient,
final OverlordClient overlordClient,
final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
- final ObjectMapper jsonMapper
+ final ObjectMapper jsonMapper,
+ @EscalatedClient final HttpClient httpClient
)
{
Preconditions.checkNotNull(serverView, "serverView");
@@ -255,7 +258,9 @@ public class SystemSchema extends AbstractSchema
TASKS_TABLE,
new TasksTable(overlordClient, authorizerMapper),
SUPERVISOR_TABLE,
- new SupervisorsTable(overlordClient, authorizerMapper)
+ new SupervisorsTable(overlordClient, authorizerMapper),
+ SystemServerPropertiesTable.TABLE_NAME,
+ new SystemServerPropertiesTable(druidNodeDiscoveryProvider,
authorizerMapper, httpClient, jsonMapper)
);
}
@@ -741,13 +746,6 @@ public class SystemSchema extends AbstractSchema
}
}
- private static Iterator<DiscoveryDruidNode>
getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider)
- {
- return Arrays.stream(NodeRole.values())
- .flatMap(nodeRole ->
druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream())
- .collect(Collectors.toList())
- .iterator();
- }
}
/**
@@ -1105,7 +1103,7 @@ public class SystemSchema extends AbstractSchema
/**
* Checks if an authenticated user has the STATE READ permissions needed to
view server information.
*/
- private static void checkStateReadAccessForServers(
+ public static void checkStateReadAccessForServers(
AuthenticationResult authenticationResult,
AuthorizerMapper authorizerMapper
)
@@ -1121,6 +1119,17 @@ public class SystemSchema extends AbstractSchema
}
}
+ /**
+ * Returns an iterator over all discoverable Druid nodes in the cluster.
+ */
+ public static Iterator<DiscoveryDruidNode>
getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider)
+ {
+ return Arrays.stream(NodeRole.values())
+ .flatMap(nodeRole ->
druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream())
+ .collect(Collectors.toList())
+ .iterator();
+ }
+
/**
* Project a row using "projects" from {@link
SegmentsTable#scan(DataContext, List, int[])}.
* <p>
diff --git
a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java
b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java
new file mode 100644
index 00000000000..df8b313c8e1
--- /dev/null
+++
b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.error.InternalServerError;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * System schema table {@code sys.server_properties} that contains the
properties of all Druid servers.
+ * Each row contains the value of a single property. If a server has multiple
node roles, all the rows for
+ * that server would have multiple values in the column {@code node_roles}
rather than duplicating all the
+ * rows.
+ */
+public class SystemServerPropertiesTable extends AbstractTable implements
ScannableTable
+{
+ public static final String TABLE_NAME = "server_properties";
+
+ static final RowSignature ROW_SIGNATURE = RowSignature
+ .builder()
+ .add("server", ColumnType.STRING)
+ .add("service_name", ColumnType.STRING)
+ .add("node_roles", ColumnType.STRING)
+ .add("property", ColumnType.STRING)
+ .add("value", ColumnType.STRING)
+ .build();
+
+ private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
+ private final AuthorizerMapper authorizerMapper;
+ private final HttpClient httpClient;
+ private final ObjectMapper jsonMapper;
+
+ public SystemServerPropertiesTable(
+ DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
+ AuthorizerMapper authorizerMapper,
+ HttpClient httpClient,
+ ObjectMapper jsonMapper
+ )
+ {
+ this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider;
+ this.authorizerMapper = authorizerMapper;
+ this.httpClient = httpClient;
+ this.jsonMapper = jsonMapper;
+ }
+
+ @Override
+ public RelDataType getRowType(RelDataTypeFactory typeFactory)
+ {
+ return RowSignatures.toRelDataType(ROW_SIGNATURE, typeFactory);
+ }
+
+ @Override
+ public Schema.TableType getJdbcTableType()
+ {
+ return Schema.TableType.SYSTEM_TABLE;
+ }
+
+ @Override
+ public Enumerable<Object[]> scan(DataContext root)
+ {
+ final AuthenticationResult authenticationResult = (AuthenticationResult)
Preconditions.checkNotNull(
+ root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT),
+ "authenticationResult in dataContext"
+ );
+ SystemSchema.checkStateReadAccessForServers(authenticationResult,
authorizerMapper);
+ final Iterator<DiscoveryDruidNode> druidServers =
SystemSchema.getDruidServers(druidNodeDiscoveryProvider);
+
+ final Map<String, ServerProperties> serverToPropertiesMap = new
HashMap<>();
+ druidServers.forEachRemaining(discoveryDruidNode -> {
+ final DruidNode druidNode = discoveryDruidNode.getDruidNode();
+ final Map<String, String> propertiesMap = getProperties(druidNode);
+ if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) {
+ ServerProperties serverProperties =
serverToPropertiesMap.get(druidNode.getHostAndPortToUse());
+
serverProperties.addNodeRole(discoveryDruidNode.getNodeRole().getJsonName());
+ } else {
+ serverToPropertiesMap.put(
+ druidNode.getHostAndPortToUse(),
+ new ServerProperties(
+ druidNode.getServiceName(),
+ druidNode.getHostAndPortToUse(),
+ new
ArrayList<>(Arrays.asList(discoveryDruidNode.getNodeRole().getJsonName())),
+ propertiesMap
+ )
+ );
+ }
+ });
+ ArrayList<Object[]> rows = new ArrayList<>();
+ for (ServerProperties serverProperties : serverToPropertiesMap.values()) {
+ rows.addAll(serverProperties.toRows());
+ }
+ return Linq4j.asEnumerable(rows);
+ }
+
+ private Map<String, String> getProperties(DruidNode druidNode)
+ {
+ final String url =
druidNode.getUriToUse().resolve("/status/properties").toString();
+ try {
+ final Request request = new Request(HttpMethod.GET, new URL(url));
+ final StringFullResponseHolder response;
+ response = httpClient
+ .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8))
+ .get();
+
+ if (response.getStatus().getCode() != HttpServletResponse.SC_OK) {
+ throw new RE(
+ "Failed to get properties from node[%s]. Error code[%d],
description[%s].",
+ url,
+ response.getStatus().getCode(),
+ response.getStatus().getReasonPhrase()
+ );
+ }
+ return jsonMapper.readValue(
+ response.getContent(),
+ new TypeReference<>(){}
+ );
+ }
+ catch (Exception e) {
+ throw InternalServerError.exception(e, "HTTP request to[%s] failed",
url);
+ }
+ }
+
+ private static class ServerProperties
+ {
+ final String serviceName;
+ final String server;
+ final List<String> nodeRoles;
+ final Map<String, String> properties;
+
+ public ServerProperties(String serviceName, String server, List<String>
nodeRoles, Map<String, String> properties)
+ {
+ this.serviceName = serviceName;
+ this.server = server;
+ this.nodeRoles = nodeRoles;
+ this.properties = properties;
+ }
+
+ public void addNodeRole(String nodeRole)
+ {
+ nodeRoles.add(nodeRole);
+ }
+
+ public List<Object[]> toRows()
+ {
+ String nodeRolesString = nodeRoles.toString();
+ return properties.entrySet().stream().map(entry -> new Object[]{server,
serviceName, nodeRolesString, entry.getKey(),
entry.getValue()}).collect(Collectors.toList());
+ }
+ }
+}
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
index 269f5863374..af9d827af03 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
@@ -208,6 +208,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.add(new Object[]{"lookup", "lookyloo-chain", "TABLE",
"YES", "YES"})
.add(new Object[]{"lookup", "lookyloo121", "TABLE",
"YES", "YES"})
.add(new Object[]{"sys", "segments", "SYSTEM_TABLE",
"NO", "NO"})
+ .add(new Object[]{"sys", "server_properties",
"SYSTEM_TABLE", "NO", "NO"})
.add(new Object[]{"sys", "server_segments",
"SYSTEM_TABLE", "NO", "NO"})
.add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO",
"NO"})
.add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE",
"NO", "NO"})
@@ -253,6 +254,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.add(new Object[]{"lookup", "lookyloo-chain", "TABLE",
"YES", "YES"})
.add(new Object[]{"lookup", "lookyloo121", "TABLE",
"YES", "YES"})
.add(new Object[]{"sys", "segments", "SYSTEM_TABLE",
"NO", "NO"})
+ .add(new Object[]{"sys", "server_properties",
"SYSTEM_TABLE", "NO", "NO"})
.add(new Object[]{"sys", "server_segments",
"SYSTEM_TABLE", "NO", "NO"})
.add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO",
"NO"})
.add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE",
"NO", "NO"})
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java
b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java
index b0e707d00b8..7027a5de975 100644
---
a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java
+++
b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java
@@ -199,12 +199,14 @@ public class DruidPlannerResourceAnalyzeTest extends
BaseCalciteQueryTest
testSysTable("SELECT * FROM sys.server_segments", null,
PLANNER_CONFIG_DEFAULT);
testSysTable("SELECT * FROM sys.tasks", null, PLANNER_CONFIG_DEFAULT);
testSysTable("SELECT * FROM sys.supervisors", null,
PLANNER_CONFIG_DEFAULT);
+ testSysTable("SELECT * FROM sys.server_properties", null,
PLANNER_CONFIG_DEFAULT);
testSysTable("SELECT * FROM sys.segments", "segments",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
testSysTable("SELECT * FROM sys.servers", "servers",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
testSysTable("SELECT * FROM sys.server_segments", "server_segments",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
testSysTable("SELECT * FROM sys.tasks", "tasks",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
testSysTable("SELECT * FROM sys.supervisors", "supervisors",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
+ testSysTable("SELECT * FROM sys.server_properties", "server_properties",
PLANNER_CONFIG_AUTHORIZE_SYS_TABLES);
}
private void testSysTable(String sql, String name, PlannerConfig
plannerConfig)
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java
index eb0ab607e82..55e72a7a682 100644
---
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java
+++
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java
@@ -37,8 +37,10 @@ import
org.apache.druid.client.coordinator.NoopCoordinatorClient;
import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
import org.apache.druid.guice.LazySingleton;
import org.apache.druid.guice.LifecycleModule;
+import org.apache.druid.guice.annotations.EscalatedClient;
import org.apache.druid.guice.annotations.Json;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.java.util.http.client.HttpClient;
import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider;
import org.apache.druid.query.lookup.LookupReferencesManager;
import org.apache.druid.rpc.indexing.NoopOverlordClient;
@@ -95,6 +97,8 @@ public class DruidCalciteSchemaModuleTest extends
CalciteTestBase
private SegmentManager segmentManager;
@Mock
private DruidOperatorTable druidOperatorTable;
+ @Mock
+ private HttpClient httpClient;
private DruidCalciteSchemaModule target;
private Injector injector;
@@ -128,6 +132,8 @@ public class DruidCalciteSchemaModuleTest extends
CalciteTestBase
binder.bind(CoordinatorClient.class).to(NoopCoordinatorClient.class);
binder.bind(CentralizedDatasourceSchemaConfig.class)
.toInstance(CentralizedDatasourceSchemaConfig.create());
+ binder.bind(HttpClient.class).toInstance(httpClient);
+
binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient);
},
new LifecycleModule(),
target);
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java
index dd5af77e92f..7c35d16e9e5 100644
---
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java
+++
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java
@@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import junitparams.converters.Nullable;
import org.apache.calcite.DataContext;
import org.apache.calcite.adapter.java.JavaTypeFactory;
@@ -64,9 +65,11 @@ import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.io.Closer;
+import org.apache.druid.java.util.http.client.HttpClient;
import org.apache.druid.java.util.http.client.Request;
import org.apache.druid.java.util.http.client.response.HttpResponseHandler;
import
org.apache.druid.java.util.http.client.response.InputStreamFullResponseHolder;
+import
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
import
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
import org.apache.druid.query.QueryRunnerFactoryConglomerate;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
@@ -112,7 +115,10 @@ import org.apache.druid.timeline.SegmentStatusInCluster;
import org.apache.druid.timeline.partition.NumberedShardSpec;
import org.apache.druid.timeline.partition.ShardSpec;
import org.easymock.EasyMock;
+import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+import org.jboss.netty.handler.codec.http.HttpVersion;
import org.joda.time.DateTime;
import org.junit.Assert;
import org.junit.jupiter.api.AfterAll;
@@ -183,6 +189,7 @@ public class SystemSchemaTest extends CalciteTestBase
private MetadataSegmentView metadataView;
private DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
private FilteredServerInventoryView serverInventoryView;
+ private HttpClient httpClient;
@BeforeAll
public static void setUpClass()
@@ -261,6 +268,7 @@ public class SystemSchemaTest extends CalciteTestBase
metadataView = EasyMock.createMock(MetadataSegmentView.class);
druidNodeDiscoveryProvider =
EasyMock.createMock(DruidNodeDiscoveryProvider.class);
serverInventoryView =
EasyMock.createMock(FilteredServerInventoryView.class);
+ httpClient = EasyMock.createMock(HttpClient.class);
schema = new SystemSchema(
druidSchema,
metadataView,
@@ -270,7 +278,8 @@ public class SystemSchemaTest extends CalciteTestBase
coordinatorClient,
overlordClient,
druidNodeDiscoveryProvider,
- MAPPER
+ MAPPER,
+ httpClient
);
}
@@ -532,13 +541,13 @@ public class SystemSchemaTest extends CalciteTestBase
public void testGetTableMap()
{
Assert.assertEquals(
- ImmutableSet.of("segments", "servers", "server_segments", "tasks",
"supervisors"),
+ ImmutableSet.of("segments", "servers", "server_segments", "tasks",
"supervisors", "server_properties"),
schema.getTableNames()
);
final Map<String, Table> tableMap = schema.getTableMap();
Assert.assertEquals(
- ImmutableSet.of("segments", "servers", "server_segments", "tasks",
"supervisors"),
+ ImmutableSet.of("segments", "servers", "server_segments", "tasks",
"supervisors", "server_properties"),
tableMap.keySet()
);
final SystemSchema.SegmentsTable segmentsTable =
(SystemSchema.SegmentsTable) schema.getTableMap().get("segments");
@@ -561,6 +570,12 @@ public class SystemSchemaTest extends CalciteTestBase
Assert.assertEquals(12, serverFields.size());
Assert.assertEquals("server", serverFields.get(0).getName());
Assert.assertEquals(SqlTypeName.VARCHAR,
serverFields.get(0).getType().getSqlTypeName());
+
+ final SystemServerPropertiesTable propertiesTable =
(SystemServerPropertiesTable) schema.getTableMap()
+
.get("server_properties");
+ final RelDataType propertiesRowType = propertiesTable.getRowType(new
JavaTypeFactoryImpl());
+ final List<RelDataTypeField> propertiesFields =
propertiesRowType.getFieldList();
+ Assert.assertEquals(5, propertiesFields.size());
}
@Test
@@ -1448,6 +1463,111 @@ public class SystemSchemaTest extends CalciteTestBase
// verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE);
}
+ @Test
+ public void testPropertiesTable()
+ {
+ SystemServerPropertiesTable propertiesTable =
EasyMock.createMockBuilder(SystemServerPropertiesTable.class)
+
.withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER)
+ .createMock();
+
+ EasyMock.replay(propertiesTable);
+
+ List<Object[]> expectedRows = new ArrayList<>();
+
+ mockNodeDiscovery(NodeRole.BROKER);
+ mockNodeDiscovery(NodeRole.ROUTER);
+ mockNodeDiscovery(NodeRole.HISTORICAL);
+ mockNodeDiscovery(NodeRole.OVERLORD);
+ mockNodeDiscovery(NodeRole.PEON);
+ mockNodeDiscovery(NodeRole.INDEXER);
+
+ mockNodeDiscovery(NodeRole.COORDINATOR, coordinator, coordinator2);
+ HttpResponse coordinatorHttpResponse = new
DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
+ StringFullResponseHolder coordinatorResponseHolder = new
StringFullResponseHolder(coordinatorHttpResponse, StandardCharsets.UTF_8);
+ String coordinatorJson = "{\"druid.test-key\": \"test-value\"}";
+ coordinatorResponseHolder.addChunk(coordinatorJson);
+ expectedRows.add(new Object[]{
+ coordinator.getDruidNode().getHostAndPortToUse(),
+ coordinator.getDruidNode().getServiceName(),
+ ImmutableList.of(coordinator.getNodeRole().getJsonName()).toString(),
+ "druid.test-key", "test-value"
+ });
+
+ HttpResponse coordinator2HttpResponse = new
DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
+ StringFullResponseHolder coordinator2ResponseHolder = new
StringFullResponseHolder(coordinator2HttpResponse, StandardCharsets.UTF_8);
+ String coordinator2Json = "{\"druid.test-key3\": \"test-value3\"}";
+ coordinator2ResponseHolder.addChunk(coordinator2Json);
+ expectedRows
+ .add(new Object[]{
+ coordinator2.getDruidNode().getHostAndPortToUse(),
+ coordinator2.getDruidNode().getServiceName(),
+
ImmutableList.of(coordinator2.getNodeRole().getJsonName()).toString(),
+ "druid.test-key3", "test-value3"
+ });
+
+ mockNodeDiscovery(NodeRole.MIDDLE_MANAGER, middleManager);
+ HttpResponse middleManagerHttpResponse = new
DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
+ StringFullResponseHolder middleManagerResponseHolder = new
StringFullResponseHolder(middleManagerHttpResponse, StandardCharsets.UTF_8);
+ String middleManagerJson = "{\n"
+ + "\"druid.test-key\": \"test-value\",\n"
+ + "\"druid.test-key2\": \"test-value2\"\n"
+ + "}";
+ middleManagerResponseHolder.addChunk(middleManagerJson);
+ expectedRows
+ .add(new Object[]{
+ middleManager.getDruidNode().getHostAndPortToUse(),
+ middleManager.getDruidNode().getServiceName(),
+
ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(),
+ "druid.test-key", "test-value"
+ });
+ expectedRows
+ .add(new Object[]{
+ middleManager.getDruidNode().getHostAndPortToUse(),
+ middleManager.getDruidNode().getServiceName(),
+
ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(),
+ "druid.test-key2", "test-value2"
+ });
+
+ Map<String, ListenableFuture<StringFullResponseHolder>> urlToResponse =
ImmutableMap.of(
+ getStatusPropertiesUrl(coordinator),
Futures.immediateFuture(coordinatorResponseHolder),
+ getStatusPropertiesUrl(coordinator2),
Futures.immediateFuture(coordinator2ResponseHolder),
+ getStatusPropertiesUrl(middleManager),
Futures.immediateFuture(middleManagerResponseHolder)
+ );
+
+ EasyMock.expect(
+ httpClient.go(
+ EasyMock.isA(Request.class),
+ EasyMock.isA(StringFullResponseHandler.class)
+ )
+ ).andAnswer(() -> {
+ Request req = (Request) EasyMock.getCurrentArguments()[0];
+ String url = req.getUrl().toString();
+
+ ListenableFuture<StringFullResponseHolder> future =
urlToResponse.get(url);
+ if (future != null) {
+ return future;
+ }
+ return Futures.immediateFailedFuture(new AssertionError("Unexpected URL:
" + url));
+ }).times(3);
+
+ EasyMock.replay(druidNodeDiscoveryProvider, responseHandler, httpClient);
+
+ DataContext dataContext = createDataContext(Users.SUPER);
+ final List<Object[]> rows = propertiesTable.scan(dataContext).toList();
+ expectedRows.sort((Object[] row1, Object[] row2) -> ((Comparable)
row1[0]).compareTo(row2[0]));
+ rows.sort((Object[] row1, Object[] row2) -> ((Comparable)
row1[0]).compareTo(row2[0]));
+ Assert.assertEquals(expectedRows.size(), rows.size());
+ for (int i = 0; i < expectedRows.size(); i++) {
+ Assert.assertArrayEquals(expectedRows.get(i), rows.get(i));
+ }
+
+ }
+
+ private String getStatusPropertiesUrl(DiscoveryDruidNode discoveryDruidNode)
+ {
+ return
discoveryDruidNode.getDruidNode().getUriToUse().resolve("/status/properties").toString();
+ }
+
/**
* Creates a response holder that contains the given json.
*/
@@ -1585,6 +1705,15 @@ public class SystemSchemaTest extends CalciteTestBase
}
}
+ private DruidNodeDiscovery mockNodeDiscovery(NodeRole nodeRole,
DiscoveryDruidNode... discoveryDruidNodes)
+ {
+ final DruidNodeDiscovery druidNodeDiscovery =
EasyMock.createMock(DruidNodeDiscovery.class);
+
EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(nodeRole)).andReturn(druidNodeDiscovery).once();
+
EasyMock.expect(druidNodeDiscovery.getAllNodes()).andReturn(ImmutableList.copyOf(discoveryDruidNodes)).once();
+ EasyMock.replay(druidNodeDiscovery);
+ return druidNodeDiscovery;
+ }
+
/**
* Usernames to be used in tests.
*/
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
index eef6d87d3ae..59151d352e2 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
@@ -418,7 +418,8 @@ public class CalciteTests
coordinatorClient,
overlordClient,
provider,
- getJsonMapper()
+ getJsonMapper(),
+ new FakeHttpClient()
);
}
diff --git a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java
b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java
index f0057eb9c5d..38c344d7570 100644
--- a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java
@@ -39,10 +39,12 @@ import org.apache.druid.guice.LazySingleton;
import org.apache.druid.guice.LifecycleModule;
import org.apache.druid.guice.PolyBind;
import org.apache.druid.guice.ServerModule;
+import org.apache.druid.guice.annotations.EscalatedClient;
import org.apache.druid.guice.security.PolicyModule;
import org.apache.druid.initialization.DruidModule;
import org.apache.druid.jackson.JacksonModule;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.java.util.http.client.HttpClient;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.GenericQueryMetricsFactory;
@@ -120,6 +122,9 @@ public class SqlModuleTest
@Mock
private QueryRunnerFactoryConglomerate conglomerate;
+ @Mock
+ private HttpClient httpClient;
+
private Injector injector;
@Before
@@ -135,7 +140,8 @@ public class SqlModuleTest
queryToolChestWarehouse,
lookupExtractorFactoryContainerProvider,
joinableFactory,
- segmentCacheManager
+ segmentCacheManager,
+ httpClient
);
}
@@ -215,6 +221,7 @@ public class SqlModuleTest
binder.bind(CentralizedDatasourceSchemaConfig.class)
.toInstance(CentralizedDatasourceSchemaConfig.enabled(false));
binder.bind(DefaultQueryConfig.class).toInstance(DefaultQueryConfig.NIL);
+
binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient);
},
sqlModule,
new TestViewManagerModule()
diff --git a/website/.spelling b/website/.spelling
index 2eaa410bace..92207fe38f5 100644
--- a/website/.spelling
+++ b/website/.spelling
@@ -447,6 +447,7 @@ namespaced
namespaces
natively
netflow
+node_roles
nondescriptive
nonfinalized
non-null
@@ -539,6 +540,7 @@ searchable
secondaryPartitionPruning
seekable
seekable-stream
+service_name
servlet
setProcessingThreadNames
sigterm
@@ -677,6 +679,7 @@ PT5M
SCHEMA_NAME
SCHEMA_OWNER
SERVER_SEGMENTS
+SERVER_PROPERTIES
SMALLINT
SQL_PATH
STRING_AGG
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]