Clarkkkkk commented on a change in pull request #7550: [FLINK-10245] [Streaming 
Connector] Add Pojo, Tuple, Row and Scala Product DataStream Sink and Upsert 
Table Sink for HBase
URL: https://github.com/apache/flink/pull/7550#discussion_r300818450
 
 

 ##########
 File path: 
flink-connectors/flink-hbase/src/main/java/org/apache/flink/streaming/connectors/hbase/HBaseSink.java
 ##########
 @@ -0,0 +1,554 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.hbase;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.api.java.typeutils.PojoTypeInfo;
+import org.apache.flink.api.java.typeutils.RowTypeInfo;
+import org.apache.flink.api.java.typeutils.TupleTypeInfo;
+import org.apache.flink.api.scala.typeutils.CaseClassTypeInfo;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.transformations.SinkTransformation;
+import org.apache.flink.types.Row;
+
+import org.apache.commons.lang3.StringUtils;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import scala.Product;
+
+/**
+ * This class wraps different HBase sink implementations to provide a common 
interface for all of them.
+ *
+ * @param <IN> input type
+ */
+public class HBaseSink<IN> {
+
+       private DataStreamSink<IN> sink;
+
+       private HBaseSink(DataStreamSink<IN> sink) {
+               this.sink = sink;
+       }
+
+       private SinkTransformation<IN> getSinkTransformation() {
+               return sink.getTransformation();
+       }
+
+       /**
+        * Sets the name of this sink. This name is
+        * used by the visualization and logging during runtime.
+        *
+        * @return The named sink.
+        */
+       public HBaseSink<IN> name(String name) {
+               getSinkTransformation().setName(name);
+               return this;
+       }
+
+       /**
+        * Sets an ID for this operator.
+        *
+        * <p>The specified ID is used to assign the same operator ID across job
+        * submissions (for example when starting a job from a savepoint).
+        *
+        * <p><strong>Important</strong>: this ID needs to be unique per
+        * transformation and job. Otherwise, job submission will fail.
+        *
+        * @param uid The unique user-specified ID of this transformation.
+        * @return The operator with the specified ID.
+        */
+       @PublicEvolving
+       public HBaseSink<IN> uid(String uid) {
+               getSinkTransformation().setUid(uid);
+               return this;
+       }
+
+       /**
+        * Sets an user provided hash for this operator. This will be used AS 
IS the create the JobVertexID.
+        *
+        * <p>The user provided hash is an alternative to the generated hashes, 
that is considered when identifying an
+        * operator through the default hash mechanics fails (e.g. because of 
changes between Flink versions).
+        *
+        * <p><strong>Important</strong>: this should be used as a workaround 
or for trouble shooting. The provided hash
+        * needs to be unique per transformation and job. Otherwise, job 
submission will fail. Furthermore, you cannot
+        * assign user-specified hash to intermediate nodes in an operator 
chain and trying so will let your job fail.
+        *
+        * <p>A use case for this is in migration between Flink versions or 
changing the jobs in a way that changes the
+        * automatically generated hashes. In this case, providing the previous 
hashes directly through this method (e.g.
+        * obtained from old logs) can help to reestablish a lost mapping from 
states to their target operator.
+        *
+        * @param uidHash The user provided hash for this operator. This will 
become the JobVertexID, which is shown in the
+        *                 logs and web ui.
+        * @return The operator with the user provided hash.
+        */
+       @PublicEvolving
+       public HBaseSink<IN> setUidHash(String uidHash) {
+               getSinkTransformation().setUidHash(uidHash);
+               return this;
+       }
+
+       /**
+        * Sets the parallelism for this sink. The degree must be higher than 
zero.
+        *
+        * @param parallelism The parallelism for this sink.
+        * @return The sink with set parallelism.
+        */
+       public HBaseSink<IN> setParallelism(int parallelism) {
+               getSinkTransformation().setParallelism(parallelism);
+               return this;
+       }
+
+       /**
+        * Turns off chaining for this operator so thread co-location will not 
be
+        * used as an optimization.
+        * <p/>
+        * <p/>
+        * Chaining can be turned off for the whole
+        * job by {@link 
org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#disableOperatorChaining()}
+        * however it is not advised for performance considerations.
+        *
+        * @return The sink with chaining disabled
+        */
+       public HBaseSink<IN> disableChaining() {
+               
getSinkTransformation().setChainingStrategy(ChainingStrategy.NEVER);
+               return this;
+       }
+
+       /**
+        * Sets the slot sharing group of this operation. Parallel instances of
+        * operations that are in the same slot sharing group will be 
co-located in the same
+        * TaskManager slot, if possible.
+        *
+        * <p>Operations inherit the slot sharing group of input operations if 
all input operations
+        * are in the same slot sharing group and no slot sharing group was 
explicitly specified.
+        *
+        * <p>Initially an operation is in the default slot sharing group. An 
operation can be put into
+        * the default group explicitly by setting the slot sharing group to 
{@code "default"}.
+        *
+        * @param slotSharingGroup The slot sharing group name.
+        */
+       public HBaseSink<IN> slotSharingGroup(String slotSharingGroup) {
+               getSinkTransformation().setSlotSharingGroup(slotSharingGroup);
+               return this;
+       }
+
+       /**
+        * Writes a DataStream into a HBase database.
+        *
+        * @param input input DataStream
+        * @param <IN>  input type
+        * @return HBaseSinkBuilder, to further configure the sink
+        */
+       public static <IN> HBaseSinkBuilder<IN> 
addSink(org.apache.flink.streaming.api.scala.DataStream<IN> input) {
+               return addSink(input.javaStream());
+       }
+
+       /**
+        * Writes a DataStream into a HBase database.
+        *
+        * @param input input DataStream
+        * @param <IN>  input type
+        * @return HBaseSinkBuilder, to further configure the sink
+        */
+       public static <IN> HBaseSinkBuilder<IN> addSink(DataStream<IN> input) {
 
 Review comment:
   Sounds good.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to