rdblue commented on a change in pull request #1103: URL: https://github.com/apache/iceberg/pull/1103#discussion_r439072297
########## File path: mr/src/main/java/org/apache/iceberg/mr/mapred/TableResolver.java ########## @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Properties; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.JobConf; +import org.apache.iceberg.Table; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.hadoop.HadoopCatalog; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; + +final class TableResolver { + + private TableResolver() { + } + + static Table resolveTableFromJob(JobConf conf) throws IOException { + Properties properties = new Properties(); + properties.setProperty(InputFormatConfig.CATALOG_NAME, extractProperty(conf, InputFormatConfig.CATALOG_NAME)); + if (conf.get(InputFormatConfig.CATALOG_NAME).equals(InputFormatConfig.HADOOP_CATALOG)) { + properties.setProperty(InputFormatConfig.SNAPSHOT_TABLE, conf.get(InputFormatConfig.SNAPSHOT_TABLE, "true")); + } + properties.setProperty(InputFormatConfig.TABLE_LOCATION, extractProperty(conf, InputFormatConfig.TABLE_LOCATION)); + properties.setProperty(InputFormatConfig.TABLE_NAME, extractProperty(conf, InputFormatConfig.TABLE_NAME)); + return resolveTableFromConfiguration(conf, properties); + } + + static Table resolveTableFromConfiguration(Configuration conf, Properties properties) throws IOException { + String catalogName = properties.getProperty(InputFormatConfig.CATALOG_NAME); + URI tableLocation = pathAsURI(properties.getProperty(InputFormatConfig.TABLE_LOCATION)); + if (catalogName == null) { + throw new IllegalArgumentException("Catalog property: 'iceberg.catalog' not set in JobConf"); + } + switch (catalogName) { + case InputFormatConfig.HADOOP_TABLES: + HadoopTables tables = new HadoopTables(conf); + return tables.load(tableLocation.getPath()); + case InputFormatConfig.HADOOP_CATALOG: + String tableName = properties.getProperty(InputFormatConfig.TABLE_NAME); + TableIdentifier id = TableIdentifier.parse(tableName); + if (tableName.endsWith(InputFormatConfig.SNAPSHOT_TABLE_SUFFIX)) { + if (!Boolean.parseBoolean(properties.getProperty(InputFormatConfig.SNAPSHOT_TABLE, + Boolean.TRUE.toString()))) { + String tablePath = id.toString().replaceAll("\\.", "/"); + URI warehouseLocation = pathAsURI(tableLocation.getPath().replaceAll(tablePath, "")); + HadoopCatalog catalog = new HadoopCatalog(conf, warehouseLocation.getPath()); + return catalog.loadTable(id); + } else { + return resolveMetadataTable(conf, tableLocation.getPath(), tableName); + } + } else { + URI warehouseLocation = pathAsURI(extractWarehousePath(tableLocation.getPath(), tableName)); Review comment: > I've just realised/tested that you could create an Iceberg table using HadoopCatalog but if you had the full path you can use HadoopTables to load the table again instead... Yes, this is why I said there is no benefit to using HadoopCatalog. If you already have the full path, you can open the table with HadoopTables and not worry about the warehouse path to pass to the catalog. The only drawback is that this approach doesn't work for Hive tables. You'd need to know whether the table is a Hive table or a Hadoop table because they use different commit mechanisms. For tables tracked by the iceberg-hive connector, you'd just need to instantiate the HiveCatalog and use the table name to load the table. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
