Github user rxin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/2753#discussion_r19560920
  
    --- Diff: 
network/common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
 ---
    @@ -0,0 +1,182 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.network.client;
    +
    +import java.io.Closeable;
    +import java.lang.reflect.Field;
    +import java.net.InetSocketAddress;
    +import java.net.SocketAddress;
    +import java.util.concurrent.ConcurrentHashMap;
    +import java.util.concurrent.TimeoutException;
    +import java.util.concurrent.atomic.AtomicReference;
    +
    +import io.netty.bootstrap.Bootstrap;
    +import io.netty.buffer.PooledByteBufAllocator;
    +import io.netty.channel.Channel;
    +import io.netty.channel.ChannelFuture;
    +import io.netty.channel.ChannelInitializer;
    +import io.netty.channel.ChannelOption;
    +import io.netty.channel.EventLoopGroup;
    +import io.netty.channel.socket.SocketChannel;
    +import io.netty.util.internal.PlatformDependent;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import org.apache.spark.network.TransportContext;
    +import org.apache.spark.network.server.TransportChannelHandler;
    +import org.apache.spark.network.util.IOMode;
    +import org.apache.spark.network.util.NettyUtils;
    +import org.apache.spark.network.util.TransportConf;
    +
    +/**
    + * Factory for creating {@link TransportClient}s by using createClient.
    + *
    + * The factory maintains a connection pool to other hosts and should 
return the same
    + * {@link TransportClient} for the same remote host. It also shares a 
single worker thread pool for
    + * all {@link TransportClient}s.
    + */
    +public class TransportClientFactory implements Closeable {
    +  private final Logger logger = 
LoggerFactory.getLogger(TransportClientFactory.class);
    +
    +  private final TransportContext context;
    +  private final TransportConf conf;
    +  private final ConcurrentHashMap<SocketAddress, TransportClient> 
connectionPool;
    +
    +  private final Class<? extends Channel> socketChannelClass;
    +  private final EventLoopGroup workerGroup;
    +
    +  public TransportClientFactory(TransportContext context) {
    +    this.context = context;
    +    this.conf = context.getConf();
    +    this.connectionPool = new ConcurrentHashMap<SocketAddress, 
TransportClient>();
    +
    +    IOMode ioMode = IOMode.valueOf(conf.ioMode());
    +    this.socketChannelClass = NettyUtils.getClientChannelClass(ioMode);
    +    // TODO: Make thread pool name configurable.
    +    this.workerGroup = NettyUtils.createEventLoop(ioMode, 
conf.clientThreads(), "shuffle-client");
    +  }
    +
    +  /**
    +   * Create a new BlockFetchingClient connecting to the given remote host 
/ port.
    +   *
    +   * This blocks until a connection is successfully established.
    +   *
    +   * Concurrency: This method is safe to call from multiple threads.
    +   */
    +  public TransportClient createClient(String remoteHost, int remotePort) 
throws TimeoutException {
    +    // Get connection from the connection pool first.
    +    // If it is not found or not active, create a new one.
    +    final InetSocketAddress address = new InetSocketAddress(remoteHost, 
remotePort);
    +    TransportClient cachedClient = connectionPool.get(address);
    +    if (cachedClient != null && cachedClient.isActive()) {
    +      return cachedClient;
    +    } else if (cachedClient != null) {
    +      connectionPool.remove(address, cachedClient); // Remove inactive 
clients.
    +    }
    +
    +    logger.debug("Creating new connection to " + address);
    +
    +    Bootstrap bootstrap = new Bootstrap();
    +    bootstrap.group(workerGroup)
    +      .channel(socketChannelClass)
    +       // Disable Nagle's Algorithm since we don't want packets to wait
    +      .option(ChannelOption.TCP_NODELAY, true)
    +      .option(ChannelOption.SO_KEEPALIVE, true)
    +      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 
conf.connectionTimeoutMs());
    +
    +    // Use pooled buffers to reduce temporary buffer allocation
    +    bootstrap.option(ChannelOption.ALLOCATOR, 
createPooledByteBufAllocator());
    +
    +    final AtomicReference<TransportClient> client = new 
AtomicReference<TransportClient>();
    +
    +    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
    +      @Override
    +      public void initChannel(SocketChannel ch) {
    +        TransportChannelHandler clientHandler = 
context.initializePipeline(ch);
    +        client.set(clientHandler.getClient());
    +      }
    +    });
    +
    +    // Connect to the remote server
    +    ChannelFuture cf = bootstrap.connect(address);
    +    if (!cf.awaitUninterruptibly(conf.connectionTimeoutMs())) {
    +      throw new TimeoutException(
    +        String.format("Connecting to %s timed out (%s ms)", address, 
conf.connectionTimeoutMs()));
    +    } else if (cf.cause() != null) {
    +      throw new RuntimeException(String.format("Failed to connect to %s", 
address), cf.cause());
    +    }
    +
    +    // Successful connection
    --- End diff --
    
    in your next PR, please add some comment on the racing, e.g.
    
    "Two threads could race to establish the connections. In the case another 
thread has already established the connection while this thread is still trying 
to, we close the connection started by this thread."


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to