nfsantos commented on code in PR #728: URL: https://github.com/apache/jackrabbit-oak/pull/728#discussion_r989828070
########## oak-run/src/main/java/org/apache/jackrabbit/oak/run/Downloader.java: ########## @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.run; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URL; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Generic concurrent file downloader which uses Java NIO channels to potentially leverage OS internal optimizations. + */ +public class Downloader implements Closeable { + + private static final Logger LOG = LoggerFactory.getLogger(Downloader.class); + + private final ExecutorService executorService; + + public Downloader(int concurrency) { + LOG.info("Initializing Downloader with max number of concurrent requests={}", concurrency); + this.executorService = new ThreadPoolExecutor( + (int) Math.ceil(concurrency * .1), concurrency, 60L, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder() + .setNameFormat("downloader-%d") + .setDaemon(true) + .build() + ); + } + + public List<ItemResponse> download(List<Item> items) { + LOG.debug("Preparing to download {} items.\n{}", items.size(), items); + try { + return executorService + .invokeAll(items.stream().map(DownloadWorker::new).collect(Collectors.toList())) + .stream() + .map(itemResponseFuture -> { + try { + return itemResponseFuture.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() throws IOException { + executorService.shutdown(); + } + + public static class Item { + public String source; + public String destination; + + @Override + public String toString() { + return "Item{" + + "source='" + source + '\'' + + ", destination='" + destination + '\'' + + '}'; + } + } + + public static class ItemResponse { + public final Item item; + public boolean failed; + public long size; + public long time; + public Throwable throwable; + + public ItemResponse(Item item) { + this.item = item; + } + } + + private static class DownloadWorker implements Callable<ItemResponse> { + + private final Item item; + + DownloadWorker(Item item) { + this.item = item; + } + + @Override + public ItemResponse call() { + ItemResponse response = new ItemResponse(item); + long t0 = System.currentTimeMillis(); + try { + URL sourceUrl = new URL(item.source); + File destinationFile = new File(item.destination); + destinationFile.getParentFile().mkdirs(); + try (ReadableByteChannel byteChannel = Channels.newChannel(sourceUrl.openStream()); Review Comment: The 60 minutes for read seems excessive. Can there be situations where a connection to the blob store stops sending data for, let's say, 50 minutes and then it restarts? That is, if a connection is not sending data for more than a few minutes, is it reasonable to expect that it can resume? I'm not familiar with Azure blob store, I'm writing out of experience with S3, where in my previous job we had trouble when we used very large timeouts, especially the read timeout. The system seemed to hang when it hit these timeouts and I never seen any situation where a large read timeout was useful. Usually if the data flow stoped for more than a few tens of seconds, it would not recover. In the end, we were setting a generous connect timeout (but not more than a couple of minutes) and a read timeout on the order of tens of seconds, with the assumption that once a connection is established, the data will flow at a steady pace. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
