[
https://issues.apache.org/jira/browse/STORM-876?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15031228#comment-15031228
]
ASF GitHub Bot commented on STORM-876:
--------------------------------------
Github user redsanket commented on a diff in the pull request:
https://github.com/apache/storm/pull/845#discussion_r46101477
--- Diff:
external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java
---
@@ -0,0 +1,518 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.hdfs.blobstore;
+
+import backtype.storm.Config;
+import backtype.storm.blobstore.AtomicOutputStream;
+import backtype.storm.blobstore.BlobStore;
+import backtype.storm.blobstore.BlobStoreAclHandler;
+import backtype.storm.generated.AccessControl;
+import backtype.storm.generated.AuthorizationException;
+import backtype.storm.generated.KeyAlreadyExistsException;
+import backtype.storm.generated.KeyNotFoundException;
+import backtype.storm.generated.ReadableBlobMeta;
+import backtype.storm.generated.SettableBlobMeta;
+import backtype.storm.generated.AccessControlType;
+
+import backtype.storm.security.auth.NimbusPrincipal;
+import backtype.storm.security.auth.SingleUserPrincipal;
+import backtype.storm.utils.Utils;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.UUID;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Iterator;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.*;
+
+public class BlobStoreTest {
+ private static final Logger LOG =
LoggerFactory.getLogger(BlobStoreTest.class);
+ protected static MiniDFSCluster dfscluster = null;
+ protected static Configuration hadoopConf = null;
+ URI base;
+ File baseFile;
+ private static Map conf = new HashMap();
+ public static final int READ = 0x01;
+ public static final int WRITE = 0x02;
+ public static final int ADMIN = 0x04;
+
+ @Before
+ public void init() {
+ initializeConfigs();
+ baseFile = new File("/tmp/blob-store-test-"+UUID.randomUUID());
+ base = baseFile.toURI();
+ }
+
+ @After
+ public void cleanup() throws IOException {
+ FileUtils.deleteDirectory(baseFile);
+ }
+
+ @AfterClass
+ public static void cleanupAfterClass() throws IOException {
+ if (dfscluster != null) {
+ dfscluster.shutdown();
+ }
+ }
+
+ // Method which initializes nimbus admin
+ public static void initializeConfigs() {
+ conf.put(Config.NIMBUS_ADMINS,"admin");
+ conf.put(Config.NIMBUS_SUPERVISOR_USERS,"supervisor");
+ }
+
+ //Gets Nimbus Subject with NimbusPrincipal set on it
+ public static Subject getNimbusSubject() {
+ Subject nimbus = new Subject();
+ nimbus.getPrincipals().add(new NimbusPrincipal());
+ return nimbus;
+ }
+
+ // Overloading the assertStoreHasExactly method accomodate Subject in
order to check for authorization
+ public static void assertStoreHasExactly(BlobStore store, Subject who,
String ... keys)
+ throws IOException, KeyNotFoundException, AuthorizationException
{
+ Set<String> expected = new HashSet<String>(Arrays.asList(keys));
+ Set<String> found = new HashSet<String>();
+ Iterator<String> c = store.listKeys();
+ while (c.hasNext()) {
+ String keyName = c.next();
+ found.add(keyName);
+ }
+ Set<String> extra = new HashSet<String>(found);
+ extra.removeAll(expected);
+ assertTrue("Found extra keys in the blob store "+extra,
extra.isEmpty());
+ Set<String> missing = new HashSet<String>(expected);
+ missing.removeAll(found);
+ assertTrue("Found keys missing from the blob store "+missing,
missing.isEmpty());
+ }
+
+ public static void assertStoreHasExactly(BlobStore store, String ...
keys)
+ throws IOException, KeyNotFoundException, AuthorizationException
{
+ assertStoreHasExactly(store, null, keys);
+ }
+
+ // Overloading the readInt method accomodate Subject in order to check
for authorization (security turned on)
+ public static int readInt(BlobStore store, Subject who, String key)
throws IOException, KeyNotFoundException, AuthorizationException {
--- End diff --
addressed
> Dist Cache: Basic Functionality
> -------------------------------
>
> Key: STORM-876
> URL: https://issues.apache.org/jira/browse/STORM-876
> Project: Apache Storm
> Issue Type: Improvement
> Components: storm-core
> Reporter: Robert Joseph Evans
> Assignee: Robert Joseph Evans
> Attachments: DISTCACHE.md, DistributedCacheDesignDocument.pdf
>
>
> Basic functionality for the Dist Cache feature.
> As part of this a new API should be added to support uploading and
> downloading dist cache items. storm-core.ser, storm-conf.ser and storm.jar
> should be written into the blob store instead of residing locally. We need a
> default implementation of the blob store that does essentially what nimbus
> currently does and does not need anything extra. But having an HDFS backend
> too would be great for scalability and HA.
> The supervisor should provide a way to download and manage these blobs and
> provide a working directory for the worker process with symlinks to the
> blobs. It should also allow the blobs to be updated and switch the symlink
> atomically to point to the new blob once it is downloaded.
> All of this is already done by code internal to Yahoo! we are in the process
> of getting it ready to push back to open source shortly.
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)