[ 
https://issues.apache.org/jira/browse/STORM-876?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15021837#comment-15021837
 ] 

ASF GitHub Bot commented on STORM-876:
--------------------------------------

Github user bastiliu commented on a diff in the pull request:

    https://github.com/apache/storm/pull/845#discussion_r45583718
  
    --- Diff: storm-core/src/jvm/backtype/storm/blobstore/NimbusBlobStore.java 
---
    @@ -0,0 +1,419 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package backtype.storm.blobstore;
    +
    +import backtype.storm.Config;
    +import backtype.storm.generated.AuthorizationException;
    +import backtype.storm.generated.BeginDownloadResult;
    +import backtype.storm.generated.ListBlobsResult;
    +import backtype.storm.generated.ReadableBlobMeta;
    +import backtype.storm.generated.SettableBlobMeta;
    +import backtype.storm.generated.KeyAlreadyExistsException;
    +import backtype.storm.generated.KeyNotFoundException;
    +import backtype.storm.utils.NimbusClient;
    +import backtype.storm.utils.Utils;
    +import org.apache.thrift.TException;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import java.io.IOException;
    +import java.nio.ByteBuffer;
    +import java.util.Iterator;
    +import java.util.Map;
    +import java.util.NoSuchElementException;
    +
    +public class NimbusBlobStore extends ClientBlobStore {
    +  private static final Logger LOG = 
LoggerFactory.getLogger(NimbusBlobStore.class);
    +
    +  public class NimbusKeyIterator implements Iterator<String> {
    +    private ListBlobsResult listBlobs = null;
    +    private int offset = 0;
    +    private boolean eof = false;
    +    
    +    public NimbusKeyIterator(ListBlobsResult listBlobs) {
    +      this.listBlobs = listBlobs;
    +      this.eof = (listBlobs.get_keys_size() == 0);
    +    }
    +    
    +    private boolean isCacheEmpty() {
    +      return listBlobs.get_keys_size() <= offset;
    +    }
    +
    +    private void readMore() {
    +      if (!eof) {
    +        try {
    +          offset = 0;
    +          synchronized(client) {
    +            listBlobs = 
client.getClient().listBlobs(listBlobs.get_session());
    +          }
    +          if (listBlobs.get_keys_size() == 0) {
    +            eof = true;
    +          }
    +        } catch (TException e) {
    +          throw new RuntimeException(e);
    +        }
    +      }
    +    }
    +    
    +    @Override
    +    public synchronized boolean hasNext() {
    +      if (isCacheEmpty()) {
    +        readMore();
    +      }
    +      return !eof;
    +    }
    +
    +    @Override
    +    public synchronized String next() {
    +      if (!hasNext()) {
    +        throw new NoSuchElementException();
    +      }
    +      String ret = listBlobs.get_keys().get(offset);
    +      offset++;
    +      return ret;
    +    }
    +    
    +    @Override
    +    public void remove() {
    +      throw new UnsupportedOperationException("Delete Not Supported");
    +    }
    +  }
    +
    +  public class NimbusDownloadInputStream extends InputStreamWithMeta {
    +    private BeginDownloadResult beginBlobDownload;
    +    private byte[] buffer = null;
    +    private int offset = 0;
    +    private int end = 0;
    +    private boolean eof = false;
    +
    +    public NimbusDownloadInputStream(BeginDownloadResult 
beginBlobDownload) {
    +      this.beginBlobDownload = beginBlobDownload;
    +    }
    +
    +    @Override
    +    public long getVersion() throws IOException {
    +      return beginBlobDownload.get_version();
    +    }
    +
    +    @Override
    +    public synchronized int read() throws IOException {
    +      if (isEmpty()) {
    +        readMore();
    +        if (eof) {
    +          return -1;
    +        }
    +      }
    +      int length = Math.min(1, available());
    +      if (length == 0) {
    +        return -1;
    +      }
    +      int ret = buffer[offset];
    +      offset += length;
    +      return ret;
    +    }
    +    
    +    @Override 
    +    public synchronized int read(byte[] b, int off, int len) throws 
IOException {
    +      if (isEmpty()) {
    +        readMore();
    --- End diff --
    
    Is it possible to just return the ByteBuffer to caller, to avoid the 
unnecessary double copying here?


> Dist Cache: Basic Functionality
> -------------------------------
>
>                 Key: STORM-876
>                 URL: https://issues.apache.org/jira/browse/STORM-876
>             Project: Apache Storm
>          Issue Type: Improvement
>          Components: storm-core
>            Reporter: Robert Joseph Evans
>            Assignee: Robert Joseph Evans
>         Attachments: DISTCACHE.md, DistributedCacheDesignDocument.pdf
>
>
> Basic functionality for the Dist Cache feature.
> As part of this a new API should be added to support uploading and 
> downloading dist cache items.  storm-core.ser, storm-conf.ser and storm.jar 
> should be written into the blob store instead of residing locally. We need a 
> default implementation of the blob store that does essentially what nimbus 
> currently does and does not need anything extra.  But having an HDFS backend 
> too would be great for scalability and HA.
> The supervisor should provide a way to download and manage these blobs and 
> provide a working directory for the worker process with symlinks to the 
> blobs.  It should also allow the blobs to be updated and switch the symlink 
> atomically to point to the new blob once it is downloaded.
> All of this is already done by code internal to Yahoo! we are in the process 
> of getting it ready to push back to open source shortly.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to