rohangarg commented on code in PR #13165: URL: https://github.com/apache/druid/pull/13165#discussion_r1004819621
########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/storage/sql/SQLCatalogManager.java: ########## @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.storage.sql; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.storage.MetastoreManager; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.function.Function; + +@ManageLifecycle +public class SQLCatalogManager implements CatalogManager +{ + public static final String TABLES_TABLE = "tableDefs"; + + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_PAYLOAD = + "SELECT state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final MetastoreManager metastoreManager; + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque<Listener> listeners = new ConcurrentLinkedDeque<>(); + + @Inject + public SQLCatalogManager(MetastoreManager metastoreManager) + { + if (!metastoreManager.isSql()) { + throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); + } + this.metastoreManager = metastoreManager; + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = getTableDefnTable(); + } + + @Override + @LifecycleStart + public void start() + { + createTableDefnTable(); + } + + @Override + public void stop() + { + } + + // Mimics what MetadataStorageTablesConfig should do. + public String getTableDefnTable() + { + final String base = metastoreManager.tablesConfig().getBase(); + if (Strings.isNullOrEmpty(base)) { + return TABLES_TABLE; + } else { + return StringUtils.format("%s_%s", base, TABLES_TABLE); + } + } + + // TODO: Move to SqlMetadataConnector + @Override + public void createTableDefnTable() + { + if (!metastoreManager.createTables()) { + return; + } + connector.createTable( + tableName, + ImmutableList.of( + StringUtils.format( + "CREATE TABLE %s (\n" + + " schemaName VARCHAR(255) NOT NULL,\n" + + " name VARCHAR(255) NOT NULL,\n" + + " creationTime BIGINT NOT NULL,\n" + + " updateTime BIGINT NOT NULL,\n" + + " state CHAR(1) NOT NULL,\n" + + " payload %s,\n" + + " PRIMARY KEY(schemaName, name)\n" + + ")", + tableName, + connector.getPayloadType()))); + } + + @Override + public long create(TableMetadata table) throws DuplicateKeyException + { + try { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) throws DuplicateKeyException + { + long updateTime = System.currentTimeMillis(); + Update stmt = handle.createStatement( + StringUtils.format(INSERT_TABLE, tableName) + ) + .bind("schemaName", table.id().schema()) + .bind("name", table.id().name()) + .bind("creationTime", updateTime) + .bind("updateTime", updateTime) + .bind("state", TableMetadata.TableState.ACTIVE.code()) + .bind("payload", table.spec().toBytes(jsonMapper)); + try { + stmt.execute(); + } + catch (UnableToExecuteStatementException e) { + if (DbUtils.isDuplicateRecordException(e)) { + throw new DuplicateKeyException( + "Tried to insert a duplicate table: " + table.sqlName(), + e); + } else { + throw e; + } + } + sendAddition(table, updateTime); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof DuplicateKeyException) { + throw (DuplicateKeyException) e.getCause(); + } + throw e; + } + } + + @Override + public TableMetadata read(TableId id) + { + return dbi.withHandle( + new HandleCallback<TableMetadata>() + { + @Override + public TableMetadata withHandle(Handle handle) + { + Query<Map<String, Object>> query = handle.createQuery( + StringUtils.format(SELECT_TABLE, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + final ResultIterator<TableMetadata> resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + id, + r.getLong(1), + r.getLong(2), + TableMetadata.TableState.fromCode(r.getString(3)), + TableSpec.fromBytes(jsonMapper, r.getBytes(4)) + )) + .iterator(); + if (resultIterator.hasNext()) { + return resultIterator.next(); Review Comment: should this be closed explicitly in a finally block? ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java: ########## @@ -0,0 +1,747 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.http; + +import com.google.common.base.Strings; +import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.druid.catalog.model.ColumnSpec; +import org.apache.druid.catalog.model.SchemaRegistry.SchemaSpec; +import org.apache.druid.catalog.model.TableDefnRegistry; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.model.table.AbstractDatasourceDefn; +import org.apache.druid.catalog.storage.Actions; +import org.apache.druid.catalog.storage.CatalogStorage; +import org.apache.druid.catalog.storage.HideColumns; +import org.apache.druid.catalog.storage.MoveColumn; +import org.apache.druid.catalog.storage.MoveColumn.Position; +import org.apache.druid.catalog.storage.sql.CatalogManager.DuplicateKeyException; +import org.apache.druid.catalog.storage.sql.CatalogManager.NotFoundException; +import org.apache.druid.catalog.storage.sql.CatalogManager.OutOfDateException; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +/** + * REST endpoint for user and internal catalog actions. Catalog actions + * occur at the global level (all schemas), the schema level, or the + * table level. + * + * @see {@link CatalogListenerResource} for the client-side API. + */ +@Path(CatalogResource.ROOT_PATH) +public class CatalogResource +{ + public static final String ROOT_PATH = "/druid/coordinator/v1/catalog"; + + private final CatalogStorage catalog; + + @Inject + public CatalogResource(final CatalogStorage catalog) + { + this.catalog = catalog; + } + + private enum PostAction + { + NEW, + IFNEW, + REPLACE, + FORCE; + } + + /** + * Create a new table containing the given table specification. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param actionParam What to do if the table already exists. + * {@code ifNew} is the same as the SQL IF NOT EXISTS clause. If {@code new}, + * then an error is raised if the table exists. If {@code ifNew}, then + * the action silently does nothing if the table exists. Primarily for + * use in scripts. The other two options are primarily for use in tests. + * @param req the HTTP request used for authorization. + * @return the version number of the table + */ + @POST + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response postTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("action") String actionParam, + @QueryParam("version") long version, + @Context final HttpServletRequest req + ) + { + final PostAction action; + if (actionParam == null) { + action = PostAction.NEW; + } else { + action = PostAction.valueOf(StringUtils.toUpperCase(actionParam)); + if (action == null) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Not a valid action: [%s]. Valid actions are new, ifNew, replace, force", + actionParam + ) + ); + } + } + TableId tableId = TableId.of(dbSchema, name); + Response response = authorizeTable(tableId, spec, req); + if (response != null) { + return response; + } + TableMetadata table = TableMetadata.newTable(tableId, spec); + try { + catalog.validate(table); + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + + switch (action) { + case NEW: + return insertTableSpec(table, false); + case IFNEW: + return insertTableSpec(table, true); + case REPLACE: + return updateTableSpec(table, version); + case FORCE: + return addOrUpdateTableSpec(table); + default: + throw new ISE("Unknown action."); + } + } + + private Response authorizeTable(TableId tableId, TableSpec spec, final HttpServletRequest req) + { + // Druid has a fixed set of schemas. Ensure the one provided is valid. + Pair<Response, SchemaSpec> result = validateSchema(tableId.schema()); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + + // The schema has to be one that allows table definitions. + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot modify schema %s", tableId.schema()) + ); + } + + // Table name can't be blank or have spaces + if (Strings.isNullOrEmpty(tableId.name())) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + if (!tableId.name().equals(tableId.name().trim())) { + return Actions.badRequest(Actions.INVALID, "Table name cannot start or end with spaces"); + } + + // The user has to have permission to modify the table. + try { + catalog.authorizer().authorizeTable(schema, tableId.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + + // Validate the spec, if provided. + if (spec != null) { + + // The given table spec has to be valid for the given schema. + if (Strings.isNullOrEmpty(spec.type())) { + return Actions.badRequest(Actions.INVALID, "Table type is required"); + } + + if (!schema.accepts(spec.type())) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot create tables of type %s in schema %s", + spec.getClass().getSimpleName(), + tableId.schema() + ) + ); + } + } + + // Everything checks out, let the request proceed. + return null; + } + + private Response insertTableSpec(TableMetadata table, boolean ifNew) + { + try { + long createVersion = catalog.tables().create(table); + return Actions.okWithVersion(createVersion); + } + catch (DuplicateKeyException e) { + if (!ifNew) { + return Actions.badRequest( + Actions.DUPLICATE_ERROR, + StringUtils.format( + "A table of name %s already exists", + table.id().sqlName() + ) + ); + } else { + return Actions.okWithVersion(0); + } + } + catch (Exception e) { + return Actions.exception(e); + } + } + + private Response updateTableSpec(TableMetadata table, long version) + { + try { + long newVersion = catalog.tables().update(table, version); + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (OutOfDateException e) { + return Response + .status(Response.Status.BAD_REQUEST) + .entity( + Actions.error( + Actions.DUPLICATE_ERROR, + "The table entry not found or is older than the given version: reload and retry")) + .build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + private Response addOrUpdateTableSpec(TableMetadata table) + { + try { + long newVersion = catalog.tables().create(table); + return Actions.okWithVersion(newVersion); + } + catch (DuplicateKeyException e) { + // Fall through + } + catch (Exception e) { + return Actions.exception(e); + } + try { + long newVersion = catalog.tables().update(table, 0); + return Actions.okWithVersion(newVersion); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Update a table within the given schema. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param version An optional table version. If provided, the metadata DB + * entry for the table must be at this exact version or the update + * will fail. (Provides "optimistic locking.") If omitted (that is, + * if zero), then no update conflict change is done. + * @param req the HTTP request used for authorization. + * @return the new version number of the table + */ + @PUT + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response updateTableDefn( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("version") long version, + @Context final HttpServletRequest req + ) + { + + TableDefnRegistry tableRegistry = catalog.tableRegistry(); + return incrementalUpdate( + TableId.of(dbSchema, name), + spec, + req, + (existing) -> tableRegistry.resolve(existing).merge(spec).spec() + ); + } + + private Response incrementalUpdate( + TableId tableId, + TableSpec newSpec, + @Context final HttpServletRequest req, + Function<TableSpec, TableSpec> action + ) + { + Response response = authorizeTable(tableId, newSpec, req); + if (response != null) { + return response; + } + try { + long newVersion = catalog.tables().updatePayload(tableId, action); + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Move a single column to the start end of the column list, or before or after + * another column. Both columns must exist. Returns the version of the table + * after the update. + * <p> + * The operation is done atomically so no optimistic locking is required. + * + * @param dbSchema + * @param name + * @param command + * @param req + * @return + */ + @POST + @Path("/tables/{dbSchema}/{name}/moveColumn") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response moveColumn( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final MoveColumn command, + @Context final HttpServletRequest req + ) + { + if (command == null) { + return Actions.badRequest(Actions.INVALID, "A MoveColumn object is required"); + } + if (Strings.isNullOrEmpty(command.column)) { + return Actions.badRequest(Actions.INVALID, "A column name is required"); + } + if (command.where == null) { + return Actions.badRequest(Actions.INVALID, "A target location is required"); + } + if ((command.where == Position.BEFORE || command.where == Position.AFTER) && Strings.isNullOrEmpty(command.anchor)) { + return Actions.badRequest(Actions.INVALID, "A anchor column is required for BEFORE or AFTER"); + } + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> spec.withColumns(command.perform(spec.columns())) + ); + } + + /** + * Hide or unhide columns. If both appear, hide takes precedence. Returns the + * new table version. + */ + @POST + @Path("/tables/{dbSchema}/{name}/hideColumns") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response hideColumns( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final HideColumns command, + @Context final HttpServletRequest req + ) + { + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> { + if (!AbstractDatasourceDefn.isDatasource(spec.type())) { + throw new ISE("hideColumns is supported only for data source specs"); + } + @SuppressWarnings("unchecked") + List<String> hiddenProps = (List<String>) spec.properties().get(AbstractDatasourceDefn.HIDDEN_COLUMNS_PROPERTY); + return spec.withProperty( + AbstractDatasourceDefn.HIDDEN_COLUMNS_PROPERTY, + command.perform(hiddenProps) + ); + } + ); + } + + /** + * Drop column metadata. Only removes metadata entries, has no effect on the + * physical segments. Returns the new table version. + */ + @POST + @Path("/tables/{dbSchema}/{name}/dropColumns") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response dropColumns( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final List<String> columns, + @Context final HttpServletRequest req + ) + { + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> spec.withColumns(dropColumns(spec.columns(), columns)) + ); + } + + private static <T extends ColumnSpec> List<T> dropColumns( + final List<T> columns, + final List<String> toDrop) + { + if (toDrop == null || toDrop.isEmpty()) { + return columns; + } + Set<String> drop = new HashSet<String>(toDrop); + List<T> revised = new ArrayList<>(); + for (T col : columns) { + if (!drop.contains(col.name())) { + revised.add(col); + } + } + return revised; + } + + /** + * Retrieves the definition of the given table. + * <p> + * Returns a 404 (NOT FOUND) error if the table definition does not exist. + * Note that this check is only for the <i>definition</i>; the table (or + * datasource) itself may exist. Similarly, this call may return a definition + * even if there is no datasource of the same name (typically occurs when + * the definition is created before the datasource itself.) + * + * @param dbSchema The Druid schema. The user must have read access. + * @param name The name of the table within the schema. The user must have + * read access. + * @param req the HTTP request used for authorization. + * @return the definition for the table, if any. + */ + @GET + @Path("/tables/{dbSchema}/{name}") + @Produces(MediaType.APPLICATION_JSON) + public Response getTable( Review Comment: the name should be consistent with `listTableDetails` since both are similar ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java: ########## @@ -0,0 +1,747 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.http; + +import com.google.common.base.Strings; +import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.druid.catalog.model.ColumnSpec; +import org.apache.druid.catalog.model.SchemaRegistry.SchemaSpec; +import org.apache.druid.catalog.model.TableDefnRegistry; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.model.table.AbstractDatasourceDefn; +import org.apache.druid.catalog.storage.Actions; +import org.apache.druid.catalog.storage.CatalogStorage; +import org.apache.druid.catalog.storage.HideColumns; +import org.apache.druid.catalog.storage.MoveColumn; +import org.apache.druid.catalog.storage.MoveColumn.Position; +import org.apache.druid.catalog.storage.sql.CatalogManager.DuplicateKeyException; +import org.apache.druid.catalog.storage.sql.CatalogManager.NotFoundException; +import org.apache.druid.catalog.storage.sql.CatalogManager.OutOfDateException; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +/** + * REST endpoint for user and internal catalog actions. Catalog actions + * occur at the global level (all schemas), the schema level, or the + * table level. + * + * @see {@link CatalogListenerResource} for the client-side API. + */ +@Path(CatalogResource.ROOT_PATH) +public class CatalogResource +{ + public static final String ROOT_PATH = "/druid/coordinator/v1/catalog"; + + private final CatalogStorage catalog; + + @Inject + public CatalogResource(final CatalogStorage catalog) + { + this.catalog = catalog; + } + + private enum PostAction + { + NEW, + IFNEW, + REPLACE, + FORCE; + } + + /** + * Create a new table containing the given table specification. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param actionParam What to do if the table already exists. + * {@code ifNew} is the same as the SQL IF NOT EXISTS clause. If {@code new}, + * then an error is raised if the table exists. If {@code ifNew}, then + * the action silently does nothing if the table exists. Primarily for + * use in scripts. The other two options are primarily for use in tests. + * @param req the HTTP request used for authorization. + * @return the version number of the table + */ + @POST + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response postTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("action") String actionParam, + @QueryParam("version") long version, + @Context final HttpServletRequest req + ) + { + final PostAction action; + if (actionParam == null) { + action = PostAction.NEW; + } else { + action = PostAction.valueOf(StringUtils.toUpperCase(actionParam)); + if (action == null) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Not a valid action: [%s]. Valid actions are new, ifNew, replace, force", + actionParam + ) + ); + } + } + TableId tableId = TableId.of(dbSchema, name); + Response response = authorizeTable(tableId, spec, req); + if (response != null) { + return response; + } + TableMetadata table = TableMetadata.newTable(tableId, spec); + try { + catalog.validate(table); + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + + switch (action) { + case NEW: + return insertTableSpec(table, false); + case IFNEW: + return insertTableSpec(table, true); + case REPLACE: + return updateTableSpec(table, version); + case FORCE: + return addOrUpdateTableSpec(table); + default: + throw new ISE("Unknown action."); + } + } + + private Response authorizeTable(TableId tableId, TableSpec spec, final HttpServletRequest req) + { + // Druid has a fixed set of schemas. Ensure the one provided is valid. + Pair<Response, SchemaSpec> result = validateSchema(tableId.schema()); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + + // The schema has to be one that allows table definitions. + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot modify schema %s", tableId.schema()) + ); + } + + // Table name can't be blank or have spaces + if (Strings.isNullOrEmpty(tableId.name())) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + if (!tableId.name().equals(tableId.name().trim())) { + return Actions.badRequest(Actions.INVALID, "Table name cannot start or end with spaces"); + } + + // The user has to have permission to modify the table. + try { + catalog.authorizer().authorizeTable(schema, tableId.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + + // Validate the spec, if provided. + if (spec != null) { + + // The given table spec has to be valid for the given schema. + if (Strings.isNullOrEmpty(spec.type())) { + return Actions.badRequest(Actions.INVALID, "Table type is required"); + } + + if (!schema.accepts(spec.type())) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot create tables of type %s in schema %s", + spec.getClass().getSimpleName(), + tableId.schema() + ) + ); + } + } + + // Everything checks out, let the request proceed. + return null; + } + + private Response insertTableSpec(TableMetadata table, boolean ifNew) + { + try { + long createVersion = catalog.tables().create(table); + return Actions.okWithVersion(createVersion); + } + catch (DuplicateKeyException e) { + if (!ifNew) { + return Actions.badRequest( + Actions.DUPLICATE_ERROR, + StringUtils.format( + "A table of name %s already exists", + table.id().sqlName() + ) + ); + } else { + return Actions.okWithVersion(0); Review Comment: +1, that should also help in conflict resolution if there is any ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/storage/sql/SQLCatalogManager.java: ########## @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.storage.sql; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.storage.MetastoreManager; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.function.Function; + +@ManageLifecycle +public class SQLCatalogManager implements CatalogManager +{ + public static final String TABLES_TABLE = "tableDefs"; + + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_PAYLOAD = + "SELECT state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final MetastoreManager metastoreManager; + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque<Listener> listeners = new ConcurrentLinkedDeque<>(); + + @Inject + public SQLCatalogManager(MetastoreManager metastoreManager) + { + if (!metastoreManager.isSql()) { + throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); + } + this.metastoreManager = metastoreManager; + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = getTableDefnTable(); + } + + @Override + @LifecycleStart + public void start() + { + createTableDefnTable(); + } + + @Override + public void stop() + { + } + + // Mimics what MetadataStorageTablesConfig should do. + public String getTableDefnTable() + { + final String base = metastoreManager.tablesConfig().getBase(); + if (Strings.isNullOrEmpty(base)) { + return TABLES_TABLE; + } else { + return StringUtils.format("%s_%s", base, TABLES_TABLE); + } + } + + // TODO: Move to SqlMetadataConnector + @Override + public void createTableDefnTable() + { + if (!metastoreManager.createTables()) { Review Comment: should we check for the table's existence and throw if it doesn't exist? ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/storage/MetastoreManager.java: ########## @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.storage; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +/** + * Represents the metastore manager database and its implementation. + * Abstracts away the various kick-knacks used to define the metastore. + * The metastore operations are defined via table-specific classes. + */ +public interface MetastoreManager +{ + MetadataStorageConnector connector(); + MetadataStorageConnectorConfig config(); + MetadataStorageTablesConfig tablesConfig(); + + /** + * Whether to create tables if they do not exist. + */ + boolean createTables(); Review Comment: could be a bit interrogative - this seems like an action which would create tables and return if they were created or not ########## server/src/main/java/org/apache/druid/catalog/model/TableMetadata.java: ########## @@ -0,0 +1,241 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; + +import java.util.Objects; + +/** + * REST API level description of a table. Tables have multiple types + * as described by subclasses. Stores the operational aspects of a + * table, such as its name, creation time, state and spec. + * + * @see {@link ResolvedTable} for the semantic representation. + */ +@PublicApi +public class TableMetadata +{ + public enum TableState + { + ACTIVE("A"), + DELETING("D"); + + private final String code; + + TableState(String code) + { + this.code = code; + } + + public String code() + { + return code; + } + + public static TableState fromCode(String code) + { + for (TableState state : values()) { + if (state.code.equals(code)) { + return state; + } + } + throw new ISE("Unknown TableState code: " + code); + } + } + + private final TableId id; + private final long creationTime; + private final long updateTime; + private final TableState state; + private final TableSpec spec; Review Comment: thanks for the explanation, makes sense! ########## server/src/main/java/org/apache/druid/catalog/model/ColumnSpec.java: ########## @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.model; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.druid.guice.annotations.UnstableApi; +import org.apache.druid.java.util.common.IAE; + +import javax.annotation.Nullable; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * Specification of table columns. Columns have multiple types + * represented via the type field. + */ +@UnstableApi +public class ColumnSpec +{ + private final String type; + private final String name; + private final String sqlType; Review Comment: Thanks a lot for the explanation! I think the name makes sense to me too, I was thinking more about making `sqlType` from `String` to `ColumnType` itself - since `ColumnType` better represents the druid types and also the calcite type translation is also possible from `ColumnType`. ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java: ########## @@ -0,0 +1,747 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.http; + +import com.google.common.base.Strings; +import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.druid.catalog.model.ColumnSpec; +import org.apache.druid.catalog.model.SchemaRegistry.SchemaSpec; +import org.apache.druid.catalog.model.TableDefnRegistry; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.model.table.AbstractDatasourceDefn; +import org.apache.druid.catalog.storage.Actions; +import org.apache.druid.catalog.storage.CatalogStorage; +import org.apache.druid.catalog.storage.HideColumns; +import org.apache.druid.catalog.storage.MoveColumn; +import org.apache.druid.catalog.storage.MoveColumn.Position; +import org.apache.druid.catalog.storage.sql.CatalogManager.DuplicateKeyException; +import org.apache.druid.catalog.storage.sql.CatalogManager.NotFoundException; +import org.apache.druid.catalog.storage.sql.CatalogManager.OutOfDateException; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +/** + * REST endpoint for user and internal catalog actions. Catalog actions + * occur at the global level (all schemas), the schema level, or the + * table level. + * + * @see {@link CatalogListenerResource} for the client-side API. + */ +@Path(CatalogResource.ROOT_PATH) +public class CatalogResource +{ + public static final String ROOT_PATH = "/druid/coordinator/v1/catalog"; + + private final CatalogStorage catalog; + + @Inject + public CatalogResource(final CatalogStorage catalog) + { + this.catalog = catalog; + } + + private enum PostAction + { + NEW, + IFNEW, + REPLACE, + FORCE; + } + + /** + * Create a new table containing the given table specification. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param actionParam What to do if the table already exists. + * {@code ifNew} is the same as the SQL IF NOT EXISTS clause. If {@code new}, + * then an error is raised if the table exists. If {@code ifNew}, then + * the action silently does nothing if the table exists. Primarily for + * use in scripts. The other two options are primarily for use in tests. + * @param req the HTTP request used for authorization. + * @return the version number of the table + */ + @POST + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response postTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("action") String actionParam, + @QueryParam("version") long version, + @Context final HttpServletRequest req + ) + { + final PostAction action; + if (actionParam == null) { + action = PostAction.NEW; + } else { + action = PostAction.valueOf(StringUtils.toUpperCase(actionParam)); + if (action == null) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Not a valid action: [%s]. Valid actions are new, ifNew, replace, force", + actionParam + ) + ); + } + } + TableId tableId = TableId.of(dbSchema, name); + Response response = authorizeTable(tableId, spec, req); + if (response != null) { + return response; + } + TableMetadata table = TableMetadata.newTable(tableId, spec); + try { + catalog.validate(table); + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + + switch (action) { + case NEW: + return insertTableSpec(table, false); + case IFNEW: + return insertTableSpec(table, true); + case REPLACE: + return updateTableSpec(table, version); + case FORCE: + return addOrUpdateTableSpec(table); + default: + throw new ISE("Unknown action."); + } + } + + private Response authorizeTable(TableId tableId, TableSpec spec, final HttpServletRequest req) + { + // Druid has a fixed set of schemas. Ensure the one provided is valid. + Pair<Response, SchemaSpec> result = validateSchema(tableId.schema()); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + + // The schema has to be one that allows table definitions. + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot modify schema %s", tableId.schema()) + ); + } + + // Table name can't be blank or have spaces + if (Strings.isNullOrEmpty(tableId.name())) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + if (!tableId.name().equals(tableId.name().trim())) { + return Actions.badRequest(Actions.INVALID, "Table name cannot start or end with spaces"); + } + + // The user has to have permission to modify the table. + try { + catalog.authorizer().authorizeTable(schema, tableId.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + + // Validate the spec, if provided. + if (spec != null) { + + // The given table spec has to be valid for the given schema. + if (Strings.isNullOrEmpty(spec.type())) { + return Actions.badRequest(Actions.INVALID, "Table type is required"); + } + + if (!schema.accepts(spec.type())) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot create tables of type %s in schema %s", + spec.getClass().getSimpleName(), + tableId.schema() + ) + ); + } + } + + // Everything checks out, let the request proceed. + return null; + } + + private Response insertTableSpec(TableMetadata table, boolean ifNew) + { + try { + long createVersion = catalog.tables().create(table); + return Actions.okWithVersion(createVersion); + } + catch (DuplicateKeyException e) { + if (!ifNew) { + return Actions.badRequest( + Actions.DUPLICATE_ERROR, + StringUtils.format( + "A table of name %s already exists", + table.id().sqlName() + ) + ); + } else { + return Actions.okWithVersion(0); + } + } + catch (Exception e) { + return Actions.exception(e); + } + } + + private Response updateTableSpec(TableMetadata table, long version) + { + try { + long newVersion = catalog.tables().update(table, version); + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (OutOfDateException e) { + return Response + .status(Response.Status.BAD_REQUEST) + .entity( + Actions.error( + Actions.DUPLICATE_ERROR, + "The table entry not found or is older than the given version: reload and retry")) + .build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + private Response addOrUpdateTableSpec(TableMetadata table) + { + try { + long newVersion = catalog.tables().create(table); + return Actions.okWithVersion(newVersion); + } + catch (DuplicateKeyException e) { + // Fall through + } + catch (Exception e) { + return Actions.exception(e); + } + try { + long newVersion = catalog.tables().update(table, 0); + return Actions.okWithVersion(newVersion); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Update a table within the given schema. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param version An optional table version. If provided, the metadata DB + * entry for the table must be at this exact version or the update + * will fail. (Provides "optimistic locking.") If omitted (that is, + * if zero), then no update conflict change is done. + * @param req the HTTP request used for authorization. + * @return the new version number of the table + */ + @PUT + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response updateTableDefn( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("version") long version, + @Context final HttpServletRequest req + ) + { + + TableDefnRegistry tableRegistry = catalog.tableRegistry(); + return incrementalUpdate( + TableId.of(dbSchema, name), + spec, + req, + (existing) -> tableRegistry.resolve(existing).merge(spec).spec() + ); + } + + private Response incrementalUpdate( + TableId tableId, + TableSpec newSpec, + @Context final HttpServletRequest req, + Function<TableSpec, TableSpec> action + ) + { + Response response = authorizeTable(tableId, newSpec, req); + if (response != null) { + return response; + } + try { + long newVersion = catalog.tables().updatePayload(tableId, action); + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Move a single column to the start end of the column list, or before or after + * another column. Both columns must exist. Returns the version of the table + * after the update. + * <p> + * The operation is done atomically so no optimistic locking is required. + * + * @param dbSchema + * @param name + * @param command + * @param req + * @return + */ + @POST + @Path("/tables/{dbSchema}/{name}/moveColumn") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response moveColumn( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final MoveColumn command, + @Context final HttpServletRequest req + ) + { + if (command == null) { + return Actions.badRequest(Actions.INVALID, "A MoveColumn object is required"); + } + if (Strings.isNullOrEmpty(command.column)) { + return Actions.badRequest(Actions.INVALID, "A column name is required"); + } + if (command.where == null) { + return Actions.badRequest(Actions.INVALID, "A target location is required"); + } + if ((command.where == Position.BEFORE || command.where == Position.AFTER) && Strings.isNullOrEmpty(command.anchor)) { + return Actions.badRequest(Actions.INVALID, "A anchor column is required for BEFORE or AFTER"); + } + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> spec.withColumns(command.perform(spec.columns())) + ); + } + + /** + * Hide or unhide columns. If both appear, hide takes precedence. Returns the + * new table version. + */ + @POST + @Path("/tables/{dbSchema}/{name}/hideColumns") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response hideColumns( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final HideColumns command, + @Context final HttpServletRequest req + ) + { + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> { + if (!AbstractDatasourceDefn.isDatasource(spec.type())) { + throw new ISE("hideColumns is supported only for data source specs"); + } + @SuppressWarnings("unchecked") + List<String> hiddenProps = (List<String>) spec.properties().get(AbstractDatasourceDefn.HIDDEN_COLUMNS_PROPERTY); + return spec.withProperty( + AbstractDatasourceDefn.HIDDEN_COLUMNS_PROPERTY, + command.perform(hiddenProps) + ); + } + ); + } + + /** + * Drop column metadata. Only removes metadata entries, has no effect on the + * physical segments. Returns the new table version. + */ + @POST + @Path("/tables/{dbSchema}/{name}/dropColumns") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response dropColumns( + @PathParam("dbSchema") final String dbSchema, + @PathParam("name") final String name, + final List<String> columns, + @Context final HttpServletRequest req + ) + { + return incrementalUpdate( + TableId.of(dbSchema, name), + null, + req, + (spec) -> spec.withColumns(dropColumns(spec.columns(), columns)) + ); + } + + private static <T extends ColumnSpec> List<T> dropColumns( + final List<T> columns, + final List<String> toDrop) + { + if (toDrop == null || toDrop.isEmpty()) { + return columns; + } + Set<String> drop = new HashSet<String>(toDrop); + List<T> revised = new ArrayList<>(); Review Comment: could be done as `allColumns = new HashSet<>(columns); allColumns.removeAll(toDrop);` ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/storage/sql/SQLCatalogManager.java: ########## @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.storage.sql; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import org.apache.druid.catalog.model.TableId; +import org.apache.druid.catalog.model.TableMetadata; +import org.apache.druid.catalog.model.TableSpec; +import org.apache.druid.catalog.storage.MetastoreManager; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.function.Function; + +@ManageLifecycle +public class SQLCatalogManager implements CatalogManager +{ + public static final String TABLES_TABLE = "tableDefs"; + + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_PAYLOAD = + "SELECT state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final MetastoreManager metastoreManager; + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque<Listener> listeners = new ConcurrentLinkedDeque<>(); + + @Inject + public SQLCatalogManager(MetastoreManager metastoreManager) + { + if (!metastoreManager.isSql()) { + throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); + } + this.metastoreManager = metastoreManager; + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = getTableDefnTable(); + } + + @Override + @LifecycleStart + public void start() + { + createTableDefnTable(); + } + + @Override + public void stop() + { + } + + // Mimics what MetadataStorageTablesConfig should do. + public String getTableDefnTable() + { + final String base = metastoreManager.tablesConfig().getBase(); + if (Strings.isNullOrEmpty(base)) { + return TABLES_TABLE; + } else { + return StringUtils.format("%s_%s", base, TABLES_TABLE); + } + } + + // TODO: Move to SqlMetadataConnector + @Override + public void createTableDefnTable() + { + if (!metastoreManager.createTables()) { + return; + } + connector.createTable( + tableName, + ImmutableList.of( + StringUtils.format( + "CREATE TABLE %s (\n" + + " schemaName VARCHAR(255) NOT NULL,\n" + + " name VARCHAR(255) NOT NULL,\n" + + " creationTime BIGINT NOT NULL,\n" + + " updateTime BIGINT NOT NULL,\n" + + " state CHAR(1) NOT NULL,\n" + + " payload %s,\n" + + " PRIMARY KEY(schemaName, name)\n" + + ")", + tableName, + connector.getPayloadType()))); + } + + @Override + public long create(TableMetadata table) throws DuplicateKeyException + { + try { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) throws DuplicateKeyException + { + long updateTime = System.currentTimeMillis(); + Update stmt = handle.createStatement( + StringUtils.format(INSERT_TABLE, tableName) + ) + .bind("schemaName", table.id().schema()) + .bind("name", table.id().name()) + .bind("creationTime", updateTime) + .bind("updateTime", updateTime) + .bind("state", TableMetadata.TableState.ACTIVE.code()) + .bind("payload", table.spec().toBytes(jsonMapper)); + try { + stmt.execute(); + } + catch (UnableToExecuteStatementException e) { + if (DbUtils.isDuplicateRecordException(e)) { + throw new DuplicateKeyException( + "Tried to insert a duplicate table: " + table.sqlName(), + e); + } else { + throw e; + } + } + sendAddition(table, updateTime); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof DuplicateKeyException) { + throw (DuplicateKeyException) e.getCause(); + } + throw e; + } + } + + @Override + public TableMetadata read(TableId id) + { + return dbi.withHandle( + new HandleCallback<TableMetadata>() + { + @Override + public TableMetadata withHandle(Handle handle) + { + Query<Map<String, Object>> query = handle.createQuery( + StringUtils.format(SELECT_TABLE, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + final ResultIterator<TableMetadata> resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + id, + r.getLong(1), + r.getLong(2), + TableMetadata.TableState.fromCode(r.getString(3)), + TableSpec.fromBytes(jsonMapper, r.getBytes(4)) + )) + .iterator(); + if (resultIterator.hasNext()) { + return resultIterator.next(); + } + return null; + } + } + ); + } + + @Override + public long update(TableMetadata table, long oldVersion) throws OutOfDateException, NotFoundException + { + if (oldVersion == 0) { + return updateUnsafe(table.id(), table.spec()); + } else { + return updateSafe(table.id(), table.spec(), oldVersion); + } + } + + private long updateSafe(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException + { + try { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) throws OutOfDateException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_SAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .bind("oldVersion", oldVersion) + .execute(); + if (updateCount == 0) { + throw new OutOfDateException( + StringUtils.format( + "Table %s: not found or update version does not match DB version", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof OutOfDateException) { + throw (OutOfDateException) e.getCause(); + } + throw e; + } + } + + private long updateUnsafe(TableId id, TableSpec defn) throws NotFoundException + { + try { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) throws NotFoundException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .execute(); + if (updateCount == 0) { + throw new NotFoundException( + StringUtils.format("Table %s: not found", id.sqlName()) + ); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof NotFoundException) { + throw (NotFoundException) e.getCause(); + } + throw e; + } + } + + @Override + public long updatePayload(TableId id, Function<TableSpec, TableSpec> transform) throws NotFoundException + { + try { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) throws NotFoundException + { + handle.begin(); + try { + Query<Map<String, Object>> query = handle.createQuery( + StringUtils.format(SELECT_PAYLOAD, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + + final ResultIterator<TableMetadata> resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + id, + 0, + 0, + TableMetadata.TableState.fromCode(r.getString(1)), + TableSpec.fromBytes(jsonMapper, r.getBytes(2)) + )) + .iterator(); + TableMetadata table; + if (resultIterator.hasNext()) { + table = resultIterator.next(); + } else { + handle.rollback(); + throw new NotFoundException( + StringUtils.format("Table %s: not found", id.sqlName()) + ); + } + if (table.state() != TableMetadata.TableState.ACTIVE) { + throw new ISE("Table is in state [%s] and cannot be updated", table.state()); + } + TableSpec revised = transform.apply(table.spec()); + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", revised.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .execute(); + if (updateCount == 0) { + // Should never occur because we're holding a lock. + throw new ISE("Table %s: not found", id.sqlName()); + } + handle.commit(); + sendUpdate(id); + return updateTime; + } + catch (RuntimeException e) { + handle.rollback(); + throw e; + } + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof NotFoundException) { + throw (NotFoundException) e.getCause(); + } + throw e; + } + } + + @Override + public long markDeleting(TableId id) + { + return dbi.withHandle( + new HandleCallback<Long>() + { + @Override + public Long withHandle(Handle handle) + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_STATE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("updateTime", updateTime) + .bind("state", TableMetadata.TableState.DELETING.code()) + .execute(); + sendDeletion(id); + return updateCount == 1 ? updateTime : 0; + } + } + ); + } + + @Override + public boolean delete(TableId id) + { + return dbi.withHandle( + new HandleCallback<Boolean>() + { + @Override + public Boolean withHandle(Handle handle) + { + int updateCount = handle.createStatement( + StringUtils.format(DELETE_TABLE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .execute(); + sendDeletion(id); + return updateCount > 0; + } + } + ); + } + + @Override + public List<TableId> list() + { + return dbi.withHandle( + new HandleCallback<List<TableId>>() + { + @Override + public List<TableId> withHandle(Handle handle) + { + Query<Map<String, Object>> query = handle.createQuery( + StringUtils.format(SELECT_ALL_TABLES, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator<TableId> resultIterator = + query.map((index, r, ctx) -> + new TableId(r.getString(1), r.getString(2))) + .iterator(); Review Comment: same `resultIterator#close` doubt for all the readers ########## server/src/main/java/org/apache/druid/catalog/model/SchemaRegistryImpl.java: ########## @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.model; + +import org.apache.druid.catalog.model.table.DatasourceDefn; +import org.apache.druid.catalog.model.table.ExternalTableDefn; +import org.apache.druid.server.security.ResourceType; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +/** + * Hard-coded schema registry that knows about the well-known, and + * a few obscure, Druid schemas. Does not allow for user-defined + * schemas, which the rest of Druid would not be able to support. + */ +public class SchemaRegistryImpl implements SchemaRegistry +{ + // Mimics the definition in ExternalOperatorConvertion + // TODO: Change this when ExternalOperatorConvertion changes + private String EXTERNAL_RESOURCE = "EXTERNAL"; + + public static class SchemaDefnImpl implements SchemaSpec + { + private final String name; + private final String resource; + private final Set<String> accepts; + + public SchemaDefnImpl( + String name, + String resource, + Set<String> accepts + ) + { + this.name = name; + this.resource = resource; + this.accepts = accepts; + } + + @Override + public String name() + { + return name; + } + + @Override + public String securityResource() + { + return resource; + } + + @Override + public boolean writable() + { + return accepts != null && !accepts.isEmpty(); + } + + @Override + public boolean accepts(String tableType) + { + if (accepts == null) { + return false; + } + return accepts.contains(tableType); + } + } + + private final Map<String, SchemaSpec> builtIns; + + public SchemaRegistryImpl() + { + builtIns = new HashMap<>(); + register(new SchemaDefnImpl( + TableId.DRUID_SCHEMA, Review Comment: makes sense! ########## server/src/main/java/org/apache/druid/catalog/model/table/ClusterKeySpec.java: ########## @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.model.table; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.annotation.Nullable; + +import java.util.Objects; + +public class ClusterKeySpec +{ + private final String expr; + private final boolean desc; Review Comment: FWIU, I think clustering is a concept which is currently sorting but might be extended in future to hashing or space filling curves, trees as well. So, I thought that instead of hardcoding an attribute in the spec we could make it extendible. wdyt? For column based indexes, I completely agree with you that those can be per-column properties ########## extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/storage/MetastoreManager.java: ########## @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog.storage; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +/** + * Represents the metastore manager database and its implementation. + * Abstracts away the various kick-knacks used to define the metastore. + * The metastore operations are defined via table-specific classes. + */ +public interface MetastoreManager +{ + MetadataStorageConnector connector(); + MetadataStorageConnectorConfig config(); + MetadataStorageTablesConfig tablesConfig(); + + /** + * Whether to create tables if they do not exist. + */ + boolean createTables(); + + /** + * Object mapper to use for serializing and deserializing + * JSON objects stored in the metastore DB. + */ + ObjectMapper jsonMapper(); + + /** + * Is the implementation SQL-based? + */ + boolean isSql(); + + /** + * If SQL based, return the SQL version of the metastore + * connector. Throws an exception if not SQL-based. + */ + SQLMetadataConnector sqlConnector(); Review Comment: In the same sentiment, I think the `getDBI` method on the `SQLMetadataConnector` interface feels a bit like a leak. But due to the lack of choices, I'm ok with the general current implementation. Small styling suggestion could be to use `MetadataStorage` instead of `Metastore` to adhere to the current naming convention. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
