coufon commented on code in PR #7412: URL: https://github.com/apache/iceberg/pull/7412#discussion_r1260364236
########## gcp/src/main/java/org/apache/iceberg/gcp/biglake/BigLakeCatalog.java: ########## @@ -0,0 +1,398 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.gcp.biglake; + +import com.google.cloud.bigquery.biglake.v1.Catalog; +import com.google.cloud.bigquery.biglake.v1.CatalogName; +import com.google.cloud.bigquery.biglake.v1.Database; +import com.google.cloud.bigquery.biglake.v1.DatabaseName; +import com.google.cloud.bigquery.biglake.v1.HiveDatabaseOptions; +import com.google.cloud.bigquery.biglake.v1.Table; +import com.google.cloud.bigquery.biglake.v1.TableName; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.iceberg.BaseMetastoreCatalog; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.TableOperations; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.SupportsNamespaces; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.NoSuchNamespaceException; +import org.apache.iceberg.exceptions.NoSuchTableException; +import org.apache.iceberg.exceptions.ServiceFailureException; +import org.apache.iceberg.hadoop.Configurable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.ResolvingFileIO; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.base.Strings; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Iterables; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Streams; +import org.apache.iceberg.util.LocationUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Iceberg BigLake Metastore (BLMS) Catalog implementation. */ +public final class BigLakeCatalog extends BaseMetastoreCatalog + implements SupportsNamespaces, Configurable<Object> { + + // TODO: to move the configs to GCPProperties.java. + // User provided properties. + // The endpoint of BigLake API. Optional, default to DEFAULT_BIGLAKE_SERVICE_ENDPOINT. + public static final String PROPERTIES_KEY_BIGLAKE_ENDPOINT = "biglake.endpoint"; + // The GCP project ID. Required. + public static final String PROPERTIES_KEY_GCP_PROJECT = "biglake.project-id"; + // The GCP location (https://cloud.google.com/bigquery/docs/locations). Optional, default to + // DEFAULT_GCP_LOCATION. + public static final String PROPERTIES_KEY_GCP_LOCATION = "biglake.location"; + // The BLMS catalog ID. It is the container resource of databases and tables. + // It links a BLMS catalog with this Iceberg catalog. + public static final String PROPERTIES_KEY_BLMS_CATALOG = "biglake.catalog"; + + public static final String DEFAULT_BIGLAKE_SERVICE_ENDPOINT = "biglake.googleapis.com:443"; + public static final String DEFAULT_GCP_LOCATION = "us"; + + private static final Logger LOG = LoggerFactory.getLogger(BigLakeCatalog.class); + + // The name of this Iceberg catalog plugin: spark.sql.catalog.<catalog_plugin>. + private String catalogPulginName; + private Map<String, String> catalogProperties; + private FileIO fileIO; + private Object conf; + private String projectId; + private String location; + // BLMS catalog ID and fully qualified name. + private String catalogId; + private CatalogName catalogName; + private BigLakeClient client; + + // Must have a no-arg constructor to be dynamically loaded + // initialize(String name, Map<String, String> properties) will be called to complete + // initialization + public BigLakeCatalog() {} + + @Override + public void initialize(String inputName, Map<String, String> properties) { + Preconditions.checkArgument( + properties.containsKey(PROPERTIES_KEY_GCP_PROJECT), "GCP project must be specified"); + String propProjectId = properties.get(PROPERTIES_KEY_GCP_PROJECT); + String propLocation = + properties.getOrDefault(PROPERTIES_KEY_GCP_LOCATION, DEFAULT_GCP_LOCATION); + + BigLakeClient newClient; + try { + // TODO: to add more auth options of the client. Currently it uses default auth + // (https://github.com/googleapis/google-cloud-java#application-default-credentials) + // that works on GCP services (e.g., GCE, GKE, Dataproc). + newClient = + new BigLakeClient( + properties.getOrDefault( + PROPERTIES_KEY_BIGLAKE_ENDPOINT, DEFAULT_BIGLAKE_SERVICE_ENDPOINT), + propProjectId, + propLocation); + } catch (IOException e) { + throw new ServiceFailureException(e, "Creating BigLake client failed"); + } + initialize(inputName, properties, propProjectId, propLocation, newClient); + } + + @VisibleForTesting + void initialize( + String inputName, + Map<String, String> properties, + String propProjectId, + String propLocation, + BigLakeClient bigLakeClient) { + this.catalogPulginName = inputName; + this.catalogProperties = ImmutableMap.copyOf(properties); + this.projectId = propProjectId; + this.location = propLocation; + Preconditions.checkNotNull(bigLakeClient, "BigLake client must not be null"); + this.client = bigLakeClient; + + // Users can specify the BigLake catalog ID, otherwise catalog plugin will be used. + this.catalogId = properties.getOrDefault(PROPERTIES_KEY_BLMS_CATALOG, inputName); + this.catalogName = CatalogName.of(projectId, location, catalogId); + LOG.info("Use BigLake catalog: {}", catalogName.toString()); + + String fileIOImpl = + properties.getOrDefault(CatalogProperties.FILE_IO_IMPL, ResolvingFileIO.class.getName()); + this.fileIO = CatalogUtil.loadFileIO(fileIOImpl, properties, conf); + } + + @Override + protected TableOperations newTableOps(TableIdentifier identifier) { + // The identifier of metadata tables is like "ns.table.files". + // We return a non-existing table in this case (empty table ID is disallowed in BigLake + // Metastore), loadTable will try loadMetadataTable. + if (identifier.namespace().levels().length > 1 + && MetadataTableType.from(identifier.name()) != null) { + return new BigLakeTableOperations( + client, fileIO, getTableName(identifier.namespace().level(0), /* tableId= */ "")); + } + + return new BigLakeTableOperations( + client, + fileIO, + getTableName(getDatabaseId(identifier.namespace()), /* tableId= */ identifier.name())); + } + + @Override + protected String defaultWarehouseLocation(TableIdentifier identifier) { + String locationUri = getDatabase(identifier.namespace()).getHiveOptions().getLocationUri(); + return String.format( + "%s/%s", + Strings.isNullOrEmpty(locationUri) + ? getDatabaseLocation(getDatabaseId(identifier.namespace())) + : locationUri, + identifier.name()); + } + + @Override + public List<TableIdentifier> listTables(Namespace namespace) { + // When deleting a BLMS catalog via `DROP NAMESPACE <catalog>`, this method is called for + // verifying catalog emptiness. `namespace` is empty in this case, we list databases in + // this catalog instead. + // TODO: to return all tables in all databases in a BLMS catalog instead of a "placeholder". + if (namespace.levels().length == 0) { + return Iterables.isEmpty(client.listDatabases(catalogName)) + ? ImmutableList.of() + : ImmutableList.of(TableIdentifier.of("placeholder")); + } + + return Streams.stream(client.listTables(getDatabaseName(namespace))) + .map(BigLakeCatalog::getTableIdentifier) + .collect(ImmutableList.toImmutableList()); + } + + @Override + public boolean dropTable(TableIdentifier identifier, boolean purge) { + TableOperations ops = newTableOps(identifier); + // TODO: to catch NotFoundException as in https://github.com/apache/iceberg/pull/5510. + TableMetadata lastMetadata = ops.current(); + try { + client.deleteTable( + getTableName(getDatabaseId(identifier.namespace()), /* tableId= */ identifier.name())); + } catch (NoSuchTableException e) { + LOG.warn("Dropping table failed", e); + return false; + } + + if (purge && lastMetadata != null) { + CatalogUtil.dropTableData(ops.io(), lastMetadata); + } + + return true; + } + + @Override + public void renameTable(TableIdentifier from, TableIdentifier to) { + String fromDbId = getDatabaseId(from.namespace()); + String toDbId = getDatabaseId(to.namespace()); + + Preconditions.checkArgument( + fromDbId.equals(toDbId), "New table name must be in the same database"); + client.renameTable(getTableName(fromDbId, from.name()), getTableName(toDbId, to.name())); + } + + @Override + public void createNamespace(Namespace namespace, Map<String, String> metadata) { + if (namespace.levels().length == 0) { + // Used by `CREATE NAMESPACE <catalog>`. Create a BLMS catalog linked with Iceberg catalog. + client.createCatalog(catalogName, Catalog.getDefaultInstance()); + LOG.info("Created BigLake catalog: {}", catalogName.toString()); Review Comment: BigLake has the "catalog" concept which is a container of databases. We also use it to access BigQuery tables and OSS tables in the BigLake Metastore API, e.g., "bigquery" is a special catalog containing BigQuery tables. Other non-special catalogs are for OSS tables. Customers can use "spark.sql.catalog.blms.<plugin>.biglake.catalog_id=<catalog>" to specify the catalog to use. Then they can use the Spark DDLs to create or delete the catalog:"CREATE NAMESPACE <catalog>", "DROP NAMESPACE <catalog>". It is different from how other catalogs work today, where the target is a database. It is a remote RPC to create or delete a catalog resource in BigLake Metastore. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
