waterlx commented on a change in pull request #856: URL: https://github.com/apache/incubator-iceberg/pull/856#discussion_r411996047
########## File path: flink/src/main/java/org/apache/iceberg/flink/connector/sink/IcebergWriter.java ########## @@ -0,0 +1,480 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.flink.connector.sink; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Registry; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.apache.avro.AvroTypeException; +import org.apache.avro.Schema; +import org.apache.avro.generic.IndexedRecord; +import org.apache.flink.annotation.VisibleForTesting; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.ChainingStrategy; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.MetricsConfig; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Table; +import org.apache.iceberg.avro.AvroSchemaUtil; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.flink.connector.IcebergConnectorConstant; +import org.apache.iceberg.hadoop.HadoopOutputFile; +import org.apache.iceberg.hive.HiveCatalogs; +import org.apache.iceberg.io.FileAppender; +import org.apache.iceberg.io.OutputFile; +import org.apache.iceberg.parquet.Parquet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION; +import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_DEFAULT; + +public class IcebergWriter<T> extends AbstractStreamOperator<FlinkDataFile> + implements OneInputStreamOperator<T, FlinkDataFile> { + + private static final Logger LOG = LoggerFactory.getLogger(IcebergWriter.class); + private static final String FILE_NAME_SEPARATOR = "_"; + + private final AvroSerializer serializer; + private final Configuration config; + private final String metacatHost; + //private final String jobName; + //private final String catalog; + private final String database; + private final String tableName; + private final FileFormat format; + private final boolean skipIncompatibleRecord; + private final org.apache.iceberg.Schema icebergSchema; + private final PartitionSpec spec; + private final String s3BasePath; + private final Map<String, String> tableProperties; + private final String timestampFeild; + private final TimeUnit timestampUnit; + private final long maxFileSize; + + private transient String instanceId; + private transient String titusTaskId; + private transient Schema avroSchema; + private transient org.apache.hadoop.conf.Configuration hadoopConfig; + private transient Map<String, FileWriter> openPartitionFiles; + private transient int subtaskId; + private transient IcebergWriterSubtaskMetrics subtaskMetrics; + private transient IcebergWriterTaskMetrics taskMetrics; + private transient ProcessingTimeService timerService; + private transient Partitioner partitioner; + private transient FileSystem fs; + + public IcebergWriter(@Nullable AvroSerializer<T> serializer, + Configuration config) { + this.serializer = serializer; + this.config = config; + metacatHost = config.getString(IcebergConnectorConstant.METACAT_HOST, + IcebergConnectorConstant.DEFAULT_METACAT_HOST); + //jobName = config.getString(System.getenv("JOB_CLUSTER_NAME"), ""); + //catalog = config.getString(IcebergConnectorConstant.CATALOG, ""); + database = config.getString(IcebergConnectorConstant.DATABASE, ""); + tableName = config.getString(IcebergConnectorConstant.TABLE, ""); + format = FileFormat.valueOf(config.getString(IcebergConnectorConstant.FORMAT, + FileFormat.PARQUET.name())); + skipIncompatibleRecord = config.getBoolean(IcebergConnectorConstant.SKIP_INCOMPATIBLE_RECORD, + IcebergConnectorConstant.DEFAULT_SKIP_INCOMPATIBLE_RECORD); + // TODO: different from IcebergCommitter, line 147, in which, "" is taken as default + timestampFeild = config.getString(IcebergConnectorConstant.VTTS_WATERMARK_TIMESTAMP_FIELD, + IcebergConnectorConstant.DEFAULT_VTTS_WATERMARK_TIMESTAMP_UNIT); + timestampUnit = TimeUnit.valueOf(config.getString(IcebergConnectorConstant.VTTS_WATERMARK_TIMESTAMP_UNIT, + IcebergConnectorConstant.DEFAULT_VTTS_WATERMARK_TIMESTAMP_UNIT)); + maxFileSize = config.getLong(IcebergConnectorConstant.MAX_FILE_SIZE, + IcebergConnectorConstant.DEFAULT_MAX_FILE_SIZE); + +// org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); +// hadoopConfig.set(IcebergConnectorConstant.METACAT_HOST_HADOOP_CONF_KEY, metacatHost); + // Avoid Netflix code just to make it compile + //final MetacatIcebergCatalog icebergCatalog + // = new MetacatIcebergCatalog(hadoopConfig, jobName, IcebergConnectorConstant.ICEBERG_APP_TYPE); + //final BaseMetastoreCatalog icebergCatalog = null; + //final TableIdentifier tableIdentifier = TableIdentifier.of(catalog, database, tableName); + // final Table table = icebergCatalog.loadTable(tableIdentifier); + org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration(); + //hadoopConf.set(ConfVars.METASTOREURIS.varname, config.getString(ConfVars.METASTOREURIS.varname, "")); + hadoopConf.set(IcebergConnectorConstant.METACAT_HOST_HADOOP_CONF_KEY, metacatHost); + hadoopConf.set(ConfVars.METASTOREWAREHOUSE.varname, config.getString(ConfVars.METASTOREWAREHOUSE.varname, "")); + + Catalog icebergCatalog = HiveCatalogs.loadCatalog(hadoopConf); + final Table table = icebergCatalog.loadTable(TableIdentifier.of(database, tableName)); + ImmutableMap.Builder<String, String> tablePropsBuilder = ImmutableMap.<String, String>builder() + .putAll(table.properties()); + if (!table.properties().containsKey(PARQUET_COMPRESSION)) { + // if compression is not set in table properties, + // Flink writer defaults it to BROTLI + //TODO: org.apache.hadoop.io.compress.BrotliCodec, class not found + //tablePropsBuilder.put(PARQUET_COMPRESSION, CompressionCodecName.BROTLI.name()); + tablePropsBuilder.put(PARQUET_COMPRESSION, PARQUET_COMPRESSION_DEFAULT); + } + tableProperties = tablePropsBuilder.build(); + icebergSchema = table.schema(); + spec = table.spec(); + //s3BasePath = getS3BasePath(table.location()); + s3BasePath = table.locationProvider().newDataLocation(""); // data location of the Iceberg table + LOG.info("Iceberg writer {}.{} has S3 base path: {}", database, tableName, s3BasePath); + LOG.info("Iceberg writer {}.{} created with sink config", database, tableName); + LOG.info("Iceberg writer {}.{} loaded table: schema = {}\npartition spec = {}", + database, tableName, icebergSchema, spec); + + // default ChainingStrategy is set to HEAD + // we prefer chaining to avoid the huge serialization and deserializatoin overhead. + super.setChainingStrategy(ChainingStrategy.ALWAYS); + } + + /** + * @param location location from table metadata + * e.g. s3n://bucket/hive/warehouse/database_name.db/table_name + */ + private String getS3BasePath(final String location) { Review comment: Removed and addressed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org