[ 
https://issues.apache.org/jira/browse/FLINK-3311?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15226483#comment-15226483
 ] 

ASF GitHub Bot commented on FLINK-3311:
---------------------------------------

Github user rmetzger commented on a diff in the pull request:

    https://github.com/apache/flink/pull/1771#discussion_r58561933
  
    --- Diff: 
flink-streaming-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java
 ---
    @@ -0,0 +1,136 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.flink.streaming.connectors.cassandra;
    +
    +import com.datastax.driver.core.BoundStatement;
    +import com.datastax.driver.core.Cluster;
    +import com.datastax.driver.core.PreparedStatement;
    +import com.datastax.driver.core.ResultSet;
    +import com.datastax.driver.core.ResultSetFuture;
    +import com.datastax.driver.core.Session;
    +import com.google.common.util.concurrent.FutureCallback;
    +import com.google.common.util.concurrent.Futures;
    +import org.apache.flink.api.common.typeutils.TypeSerializer;
    +import org.apache.flink.api.java.ClosureCleaner;
    +import org.apache.flink.api.java.tuple.Tuple;
    +import org.apache.flink.api.java.typeutils.runtime.TupleSerializer;
    +import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
    +import org.apache.flink.streaming.runtime.operators.GenericAtLeastOnceSink;
    +
    +import java.util.concurrent.atomic.AtomicInteger;
    +
    +/**
    + * Sink that emits its input elements into a Cassandra database. This sink 
stores incoming records within a 
    + * {@link org.apache.flink.runtime.state.AbstractStateBackend}, and only 
commits them to cassandra
    + * if a checkpoint is completed.
    + *
    + * @param <IN> Type of the elements emitted by this sink
    + */
    +public class CassandraTupleWriteAheadSink<IN extends Tuple> extends 
GenericAtLeastOnceSink<IN> {
    +   protected transient Cluster cluster;
    +   protected transient Session session;
    +
    +   private final String insertQuery;
    +   private transient PreparedStatement preparedStatement;
    +
    +   private transient Throwable exception = null;
    +   private transient FutureCallback<ResultSet> callback;
    +
    +   private ClusterBuilder builder;
    +
    +   private int updatesSent = 0;
    +   private AtomicInteger updatesConfirmed = new AtomicInteger(0);
    +
    +   private transient Object[] fields;
    +
    +   protected CassandraTupleWriteAheadSink(String insertQuery, 
TypeSerializer<IN> serializer, ClusterBuilder builder, String jobID, 
CheckpointCommitter committer) throws Exception {
    +           super(committer, serializer, jobID);
    +           this.insertQuery = insertQuery;
    +           this.builder = builder;
    +           ClosureCleaner.clean(builder, true);
    +   }
    +
    +   public void open() throws Exception {
    +           super.open();
    +           if (!getRuntimeContext().isCheckpointingEnabled()) {
    +                   throw new IllegalStateException("The write-ahead log 
requires checkpointing to be enabled.");
    +           }
    +           this.callback = new FutureCallback<ResultSet>() {
    +                   @Override
    +                   public void onSuccess(ResultSet resultSet) {
    +                           updatesConfirmed.incrementAndGet();
    +                   }
    +
    +                   @Override
    +                   public void onFailure(Throwable throwable) {
    +                           exception = throwable;
    +                   }
    +           };
    +           cluster = builder.getCluster();
    +           session = cluster.connect();
    +           preparedStatement = session.prepare(insertQuery);
    +
    +           fields = new Object[((TupleSerializer<IN>) 
serializer).getArity()];
    +   }
    +
    +   @Override
    +   public void close() throws Exception {
    +           super.close();
    +           try {
    +                   session.close();
    +           } catch (Exception e) {
    +                   LOG.error("Error while closing session.", e);
    +           }
    +           try {
    +                   cluster.close();
    +           } catch (Exception e) {
    +                   LOG.error("Error while closing cluster.", e);
    +           }
    +   }
    +
    +   @Override
    +   protected void sendValues(Iterable<IN> values, long timestamp) throws 
Exception {
    +           //verify that no query failed until now
    +           if (exception != null) {
    +                   throw new Exception(exception);
    +           }
    +           //set values for prepared statement
    +           for (IN value : values) {
    +                   for (int x = 0; x < value.getArity(); x++) {
    +                           fields[x] = value.getField(x);
    +                   }
    +                   //insert values and send to cassandra
    +                   BoundStatement s = preparedStatement.bind(fields);
    +                   s.setDefaultTimestamp(timestamp);
    +                   ResultSetFuture result = session.executeAsync(s);
    +                   updatesSent++;
    +                   if (result != null) {
    +                           //add callback to detect errors
    +                           Futures.addCallback(result, callback);
    +                   }
    +           }
    +           while (updatesSent != updatesConfirmed.get()) {
    +                   try {
    +                           Thread.sleep(100);
    +                   } catch (InterruptedException e) {
    --- End diff --
    
    I would leave the while loop on an interrupt.
    It might happen that the thread gets locked in the loop because it can not 
be interrupted.


> Add a connector for streaming data into Cassandra
> -------------------------------------------------
>
>                 Key: FLINK-3311
>                 URL: https://issues.apache.org/jira/browse/FLINK-3311
>             Project: Flink
>          Issue Type: New Feature
>          Components: Streaming Connectors
>            Reporter: Robert Metzger
>            Assignee: Andrea Sella
>
> We had users in the past asking for a Flink+Cassandra integration.
> It seems that there is a well-developed java client for connecting into 
> Cassandra: https://github.com/datastax/java-driver (ASL 2.0)
> There are also tutorials out there on how to start a local cassandra instance 
> (for the tests): 
> http://prettyprint.me/prettyprint.me/2010/02/14/running-cassandra-as-an-embedded-service/index.html
> For the data types, I think we should support TupleX types, and map standard 
> java types to the respective cassandra types.
> In addition, it seems that there is a object mapper from datastax to store 
> POJOs in Cassandra (there are annotations for defining the primary key and 
> types)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to