hadoop git commit: HDDS-678. Format of Last-Modified header is invalid in HEAD Object call. Contributed by Elek Marton.

2018-10-17 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 6d0883362 -> ba7d46d21


HDDS-678. Format of Last-Modified header is invalid in HEAD Object call. 
Contributed by Elek Marton.

(cherry picked from commit 3ed71633029e1d2d85b97e77b54e95d474066b4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba7d46d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba7d46d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba7d46d2

Branch: refs/heads/ozone-0.3
Commit: ba7d46d21ce2ee07a693914a742e89c8f31f2d11
Parents: 6d08833
Author: Bharat Viswanadham 
Authored: Wed Oct 17 20:44:41 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed Oct 17 20:45:43 2018 -0700

--
 .../apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java  | 11 ++-
 .../apache/hadoop/ozone/s3/endpoint/TestObjectHead.java  |  5 +
 2 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba7d46d2/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d32bc9f..15ad2c4 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -34,6 +34,10 @@ import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.StreamingOutput;
 import java.io.IOException;
 import java.io.InputStream;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -180,8 +184,13 @@ public class ObjectEndpoint extends EndpointBase {
   }
 }
 
+ZonedDateTime lastModificationTime =
+Instant.ofEpochMilli(key.getModificationTime())
+.atZone(ZoneId.of("GMT"));
+
 return Response.ok().status(HttpStatus.SC_OK)
-.header("Last-Modified", key.getModificationTime())
+.header("Last-Modified",
+DateTimeFormatter.RFC_1123_DATE_TIME.format(lastModificationTime))
 .header("ETag", "" + key.getModificationTime())
 .header("Content-Length", key.getDataSize())
 .header("Content-Type", "binary/octet-stream")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba7d46d2/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
index 446c2c9..6c166d7 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.ws.rs.core.Response;
 import java.io.IOException;
+import java.time.format.DateTimeFormatter;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -80,6 +81,10 @@ public class TestObjectHead {
 Assert.assertEquals(200, response.getStatus());
 Assert.assertEquals(value.getBytes().length,
 Long.parseLong(response.getHeaderString("Content-Length")));
+
+DateTimeFormatter.RFC_1123_DATE_TIME
+.parse(response.getHeaderString("Last-Modified"));
+
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-678. Format of Last-Modified header is invalid in HEAD Object call. Contributed by Elek Marton.

2018-10-17 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9146d33e1 -> 3ed716330


HDDS-678. Format of Last-Modified header is invalid in HEAD Object call. 
Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ed71633
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ed71633
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ed71633

Branch: refs/heads/trunk
Commit: 3ed71633029e1d2d85b97e77b54e95d474066b4f
Parents: 9146d33
Author: Bharat Viswanadham 
Authored: Wed Oct 17 20:44:41 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed Oct 17 20:44:52 2018 -0700

--
 .../apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java  | 11 ++-
 .../apache/hadoop/ozone/s3/endpoint/TestObjectHead.java  |  5 +
 2 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ed71633/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d32bc9f..15ad2c4 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -34,6 +34,10 @@ import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.StreamingOutput;
 import java.io.IOException;
 import java.io.InputStream;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -180,8 +184,13 @@ public class ObjectEndpoint extends EndpointBase {
   }
 }
 
+ZonedDateTime lastModificationTime =
+Instant.ofEpochMilli(key.getModificationTime())
+.atZone(ZoneId.of("GMT"));
+
 return Response.ok().status(HttpStatus.SC_OK)
-.header("Last-Modified", key.getModificationTime())
+.header("Last-Modified",
+DateTimeFormatter.RFC_1123_DATE_TIME.format(lastModificationTime))
 .header("ETag", "" + key.getModificationTime())
 .header("Content-Length", key.getDataSize())
 .header("Content-Type", "binary/octet-stream")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ed71633/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
index 446c2c9..6c166d7 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.ws.rs.core.Response;
 import java.io.IOException;
+import java.time.format.DateTimeFormatter;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -80,6 +81,10 @@ public class TestObjectHead {
 Assert.assertEquals(200, response.getStatus());
 Assert.assertEquals(value.getBytes().length,
 Long.parseLong(response.getHeaderString("Content-Length")));
+
+DateTimeFormatter.RFC_1123_DATE_TIME
+.parse(response.getHeaderString("Last-Modified"));
+
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-670. Fix OzoneFS directory rename.

2018-10-17 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 3c11dec71 -> 6d0883362


HDDS-670. Fix OzoneFS directory rename.

(cherry picked from commit 9146d33e1843524885938f60c77b47d4f52e80fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d088336
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d088336
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d088336

Branch: refs/heads/ozone-0.3
Commit: 6d08833626d9f089013ff059a64df8ba50727018
Parents: 3c11dec
Author: Hanisha Koneru 
Authored: Wed Oct 17 17:51:29 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed Oct 17 17:54:32 2018 -0700

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   2 +-
 .../hadoop/fs/ozone/TestOzoneFsRenameDir.java   | 121 +++
 2 files changed, 122 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d088336/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index d52f3b7..50a63b5 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -349,7 +349,7 @@ public class OzoneFileSystem extends FileSystem {
 }
 
 if (srcStatus.isDirectory()) {
-  if (dst.toString().startsWith(src.toString())) {
+  if (dst.toString().startsWith(src.toString() + OZONE_URI_DELIMITER)) {
 LOG.trace("Cannot rename a directory to a subdirectory of self");
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d088336/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
new file mode 100644
index 000..0c639e7
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Unit Test for verifying directory rename operation through OzoneFS.
+ */
+public class TestOzoneFsRenameDir {
+  public static final Logger LOG = LoggerFactory.getLogger(
+  TestOzoneFsRenameDir.class);
+
+  private MiniOzoneCluster cluster = null;
+  private OzoneConfiguration conf = null;
+  private static StorageHandler storageHandler;
+  private static FileSystem fs;
+
+  @Before
+  public void init() throws Exception {
+conf = new OzoneConfiguration();
+cluster = MiniOzoneCluster.newBuilder(conf)
+.setNumDatanodes(1)
+.build();
+cluster.waitForClusterToBe

hadoop git commit: HDDS-670. Fix OzoneFS directory rename.

2018-10-17 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c4364ddd -> 9146d33e1


HDDS-670. Fix OzoneFS directory rename.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9146d33e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9146d33e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9146d33e

Branch: refs/heads/trunk
Commit: 9146d33e1843524885938f60c77b47d4f52e80fb
Parents: 4c4364d
Author: Hanisha Koneru 
Authored: Wed Oct 17 17:51:29 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed Oct 17 17:51:29 2018 -0700

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   2 +-
 .../hadoop/fs/ozone/TestOzoneFsRenameDir.java   | 121 +++
 2 files changed, 122 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9146d33e/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index d52f3b7..50a63b5 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -349,7 +349,7 @@ public class OzoneFileSystem extends FileSystem {
 }
 
 if (srcStatus.isDirectory()) {
-  if (dst.toString().startsWith(src.toString())) {
+  if (dst.toString().startsWith(src.toString() + OZONE_URI_DELIMITER)) {
 LOG.trace("Cannot rename a directory to a subdirectory of self");
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9146d33e/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
new file mode 100644
index 000..0c639e7
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Unit Test for verifying directory rename operation through OzoneFS.
+ */
+public class TestOzoneFsRenameDir {
+  public static final Logger LOG = LoggerFactory.getLogger(
+  TestOzoneFsRenameDir.class);
+
+  private MiniOzoneCluster cluster = null;
+  private OzoneConfiguration conf = null;
+  private static StorageHandler storageHandler;
+  private static FileSystem fs;
+
+  @Before
+  public void init() throws Exception {
+conf = new OzoneConfiguration();
+cluster = MiniOzoneCluster.newBuilder(conf)
+.setNumDatanodes(1)
+.build();
+cluster.waitForClusterToBeReady();
+storageHandler =
+new ObjectStoreHandler(conf).getStorag

hadoop git commit: HDDS-683. Add a shell command to provide ozone mapping for a S3Bucket. Contributed by Bharat Viswanadham.

2018-10-17 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 106be1416 -> 3c11dec71


HDDS-683. Add a shell command to provide ozone mapping for a S3Bucket. 
Contributed by Bharat Viswanadham.

(cherry picked from commit 4c4364ddd06927fa990d3e66d4eadbd4a7eb9474)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c11dec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c11dec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c11dec7

Branch: refs/heads/ozone-0.3
Commit: 3c11dec714b193525ba5e6190407f2156110d71a
Parents: 106be14
Author: Bharat Viswanadham 
Authored: Wed Oct 17 16:46:06 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed Oct 17 16:47:11 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 42 +
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  5 +-
 .../web/ozShell/bucket/BucketCommands.java  |  3 +-
 .../web/ozShell/bucket/S3BucketMapping.java | 99 
 4 files changed, 147 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c11dec7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 1576d41..c736f81 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -1076,6 +1076,48 @@ public class TestOzoneShell {
 executeWithError(shell, args, "the length should be a positive number");
   }
 
+  @Test
+  public void testS3BucketMapping() throws  IOException {
+String s3Bucket = "bucket1";
+String commandOutput;
+createS3Bucket("ozone", s3Bucket);
+String volumeName = client.getOzoneVolumeName(s3Bucket);
+String[] args = new String[] {"bucket", "path", url + "/" + s3Bucket};
+if (url.startsWith("o3")) {
+  execute(shell, args);
+  commandOutput = out.toString();
+  assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
+  volumeName));
+  assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME +"://" +
+  s3Bucket + "." + volumeName));
+  out.reset();
+  //Trying to get map for an unknown bucket
+  args = new String[] {"bucket", "path", url + "/" + "unknownbucket"};
+  executeWithError(shell, args, "S3_BUCKET_NOT_FOUND");
+} else {
+  executeWithError(shell, args, "Ozone REST protocol does not support " +
+  "this operation");
+}
+
+// No bucket name
+args = new String[] {"bucket", "path", url};
+executeWithError(shell, args, "S3Bucket name is required");
+
+// Invalid bucket name
+args = new String[] {"bucket", "path", url + "/" + s3Bucket +
+  "/multipleslash"};
+executeWithError(shell, args, "Invalid S3Bucket name. Delimiters (/) not" +
+" allowed");
+  }
+
+  private void createS3Bucket(String userName, String s3Bucket) {
+try {
+  client.createS3Bucket("ozone", s3Bucket);
+} catch (IOException ex) {
+  GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
+}
+  }
+
   private OzoneVolume creatVolume() throws OzoneException, IOException {
 String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
 VolumeArgs volumeArgs = VolumeArgs.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c11dec7/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 0f3969f..28ab900 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -51,7 +51,7 @@ public class Shell extends GenericCli {
   public static final String OZONE_URI_DESCRIPTION = "Ozone URI could start "
   + "with o3:// or http(s):// or without prefix. REST protocol will "
   + "be used for http(s), RPC otherwise. URI may contain the host and port 
"
-  + "of the SCM server. Both are optional. "
+  + "of the OM server. Both are optional. "
   + "If they are not specified it will be identified from "
   + "the config files.";
 
@@ -64,6 +64,9 @@ public class Shell extends GenericC

hadoop git commit: HDDS-683. Add a shell command to provide ozone mapping for a S3Bucket. Contributed by Bharat Viswanadham.

2018-10-17 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7347fa2df -> 4c4364ddd


HDDS-683. Add a shell command to provide ozone mapping for a S3Bucket. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c4364dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c4364dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c4364dd

Branch: refs/heads/trunk
Commit: 4c4364ddd06927fa990d3e66d4eadbd4a7eb9474
Parents: 7347fa2
Author: Bharat Viswanadham 
Authored: Wed Oct 17 16:46:06 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed Oct 17 16:46:13 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 42 +
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  5 +-
 .../web/ozShell/bucket/BucketCommands.java  |  3 +-
 .../web/ozShell/bucket/S3BucketMapping.java | 99 
 4 files changed, 147 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4364dd/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 03efa1c..a7eecc0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -1124,6 +1124,48 @@ public class TestOzoneShell {
 executeWithError(shell, args, "the length should be a positive number");
   }
 
+  @Test
+  public void testS3BucketMapping() throws  IOException {
+String s3Bucket = "bucket1";
+String commandOutput;
+createS3Bucket("ozone", s3Bucket);
+String volumeName = client.getOzoneVolumeName(s3Bucket);
+String[] args = new String[] {"bucket", "path", url + "/" + s3Bucket};
+if (url.startsWith("o3")) {
+  execute(shell, args);
+  commandOutput = out.toString();
+  assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
+  volumeName));
+  assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME +"://" +
+  s3Bucket + "." + volumeName));
+  out.reset();
+  //Trying to get map for an unknown bucket
+  args = new String[] {"bucket", "path", url + "/" + "unknownbucket"};
+  executeWithError(shell, args, "S3_BUCKET_NOT_FOUND");
+} else {
+  executeWithError(shell, args, "Ozone REST protocol does not support " +
+  "this operation");
+}
+
+// No bucket name
+args = new String[] {"bucket", "path", url};
+executeWithError(shell, args, "S3Bucket name is required");
+
+// Invalid bucket name
+args = new String[] {"bucket", "path", url + "/" + s3Bucket +
+  "/multipleslash"};
+executeWithError(shell, args, "Invalid S3Bucket name. Delimiters (/) not" +
+" allowed");
+  }
+
+  private void createS3Bucket(String userName, String s3Bucket) {
+try {
+  client.createS3Bucket("ozone", s3Bucket);
+} catch (IOException ex) {
+  GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
+}
+  }
+
   private OzoneVolume creatVolume() throws OzoneException, IOException {
 String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
 VolumeArgs volumeArgs = VolumeArgs.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4364dd/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 0f3969f..28ab900 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -51,7 +51,7 @@ public class Shell extends GenericCli {
   public static final String OZONE_URI_DESCRIPTION = "Ozone URI could start "
   + "with o3:// or http(s):// or without prefix. REST protocol will "
   + "be used for http(s), RPC otherwise. URI may contain the host and port 
"
-  + "of the SCM server. Both are optional. "
+  + "of the OM server. Both are optional. "
   + "If they are not specified it will be identified from "
   + "the config files.";
 
@@ -64,6 +64,9 @@ public class Shell extends GenericCli {
   public static final String OZONE_KEY_URI_DESCRIPTION =
   "URI of 

[2/4] hadoop git commit: HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. Contributed by CR Hota.

2018-10-17 Thread inigoiri
HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. 
Contributed by CR Hota.

(cherry picked from commit 7347fa2df3cfd48857f4ded3539a60101d026daf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30b65ea1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30b65ea1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30b65ea1

Branch: refs/heads/branch-3.2
Commit: 30b65ea18f8d61970545c5fdc4b736d8002d0b60
Parents: 30fc596
Author: Inigo Goiri 
Authored: Wed Oct 17 16:40:25 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Oct 17 16:41:43 2018 -0700

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30b65ea1/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index edc9918..72bf6af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -179,11 +179,11 @@ The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-
 
 Once the Router is configured, it can be started:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start dfsrouter
 
 And to stop it:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop dfsrouter
 
 ### Mount table management
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. Contributed by CR Hota.

2018-10-17 Thread inigoiri
HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. 
Contributed by CR Hota.

(cherry picked from commit 7347fa2df3cfd48857f4ded3539a60101d026daf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea19a361
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea19a361
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea19a361

Branch: refs/heads/branch-3.0
Commit: ea19a361325402cde70a0ae9959ac6a248c2c80b
Parents: 0aee3a0
Author: Inigo Goiri 
Authored: Wed Oct 17 16:40:25 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Oct 17 16:43:05 2018 -0700

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea19a361/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 4f12e23..2ee3004 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -174,11 +174,11 @@ The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-
 
 Once the Router is configured, it can be started:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start dfsrouter
 
 And to stop it:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop dfsrouter
 
 ### Mount table management
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. Contributed by CR Hota.

2018-10-17 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0aee3a0c3 -> ea19a3613
  refs/heads/branch-3.1 65b27f8ed -> 16e1ab4c9
  refs/heads/branch-3.2 30fc5966a -> 30b65ea18
  refs/heads/trunk d93d515af -> 7347fa2df


HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. 
Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7347fa2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7347fa2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7347fa2d

Branch: refs/heads/trunk
Commit: 7347fa2df3cfd48857f4ded3539a60101d026daf
Parents: d93d515
Author: Inigo Goiri 
Authored: Wed Oct 17 16:40:25 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Oct 17 16:40:25 2018 -0700

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7347fa2d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index edc9918..72bf6af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -179,11 +179,11 @@ The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-
 
 Once the Router is configured, it can be started:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start dfsrouter
 
 And to stop it:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop dfsrouter
 
 ### Mount table management
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/4] hadoop git commit: HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. Contributed by CR Hota.

2018-10-17 Thread inigoiri
HDFS-14000. RBF: Documentation should reflect right scripts for v3.0 and above. 
Contributed by CR Hota.

(cherry picked from commit 7347fa2df3cfd48857f4ded3539a60101d026daf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16e1ab4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16e1ab4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16e1ab4c

Branch: refs/heads/branch-3.1
Commit: 16e1ab4c991943e516ece6163a90d99b67c1d421
Parents: 65b27f8
Author: Inigo Goiri 
Authored: Wed Oct 17 16:40:25 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Oct 17 16:42:32 2018 -0700

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16e1ab4c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 62b1107..47b049e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -179,11 +179,11 @@ The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-
 
 Once the Router is configured, it can be started:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start dfsrouter
 
 And to stop it:
 
-[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop dfsrouter
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop dfsrouter
 
 ### Mount table management
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-651. Rename o3 to o3fs for Filesystem.

2018-10-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 51a79cc71 -> 106be1416


HDDS-651. Rename o3 to o3fs for Filesystem.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/106be141
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/106be141
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/106be141

Branch: refs/heads/ozone-0.3
Commit: 106be1416dfe6e6bbf90d3653edca5eb2ef54d44
Parents: 51a79cc
Author: Jitendra Pandey 
Authored: Wed Oct 17 14:18:46 2018 -0700
Committer: Jitendra Pandey 
Committed: Wed Oct 17 14:32:22 2018 -0700

--
 .../src/main/resources/core-default.xml | 13 -
 .../conf/TestCommonConfigurationFields.java |  2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 +-
 .../dist/src/main/compose/ozonefs/docker-config |  2 +-
 .../src/main/smoketest/ozonefs/ozonefs.robot| 52 ++--
 hadoop-ozone/docs/content/OzoneFS.md|  6 +--
 .../hadoop/ozone/web/ozShell/Handler.java   |  6 +--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |  2 +-
 .../fs/ozone/TestOzoneFileInterfaces.java   |  2 +-
 9 files changed, 40 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f3167f2..c51030b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1772,19 +1772,6 @@
 
 
 
-
-
-  fs.o3.impl
-  org.apache.hadoop.fs.ozone.OzoneFileSystem
-  The implementation class of the Ozone FileSystem.
-
-
-
-  fs.AbstractFileSystem.o3.impl
-  org.apache.hadoop.fs.ozone.OzFs
-  The implementation class of the OzFs 
AbstractFileSystem.
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index e10617d..50af230 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -102,7 +102,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3a.");
 
 // O3 properties are in a different subtree.
-xmlPrefixToSkipCompare.add("fs.o3.");
+xmlPrefixToSkipCompare.add("fs.o3fs.");
 
 //ftp properties are in a different subtree.
 // - org.apache.hadoop.fs.ftp.FTPFileSystem.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..b77d621 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -63,7 +63,10 @@ public final class OzoneConsts {
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
-  public static final String OZONE_URI_SCHEME = "o3";
+  // Ozone File System scheme
+  public static final String OZONE_URI_SCHEME = "o3fs";
+
+  public static final String OZONE_RPC_SCHEME = "o3";
   public static final String OZONE_HTTP_SCHEME = "http";
   public static final String OZONE_URI_DELIMITER = "/";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 675dcba..5061afa 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.XML_fs.o3.impl=org.apache.h

hadoop git commit: HDDS-651. Rename o3 to o3fs for Filesystem.

2018-10-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9abda8394 -> d93d515af


HDDS-651. Rename o3 to o3fs for Filesystem.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93d515a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93d515a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93d515a

Branch: refs/heads/trunk
Commit: d93d515af50055f7743d8fffd563268416d05212
Parents: 9abda83
Author: Jitendra Pandey 
Authored: Wed Oct 17 14:18:46 2018 -0700
Committer: Jitendra Pandey 
Committed: Wed Oct 17 14:19:17 2018 -0700

--
 .../src/main/resources/core-default.xml | 13 -
 .../conf/TestCommonConfigurationFields.java |  2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 +-
 .../dist/src/main/compose/ozonefs/docker-config |  2 +-
 .../src/main/smoketest/ozonefs/ozonefs.robot| 52 ++--
 hadoop-ozone/docs/content/OzoneFS.md|  6 +--
 .../hadoop/ozone/web/ozShell/Handler.java   |  6 +--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |  2 +-
 .../fs/ozone/TestOzoneFileInterfaces.java   |  2 +-
 9 files changed, 40 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 599396f..ce3a407 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1788,19 +1788,6 @@
 
 
 
-
-
-  fs.o3.impl
-  org.apache.hadoop.fs.ozone.OzoneFileSystem
-  The implementation class of the Ozone FileSystem.
-
-
-
-  fs.AbstractFileSystem.o3.impl
-  org.apache.hadoop.fs.ozone.OzFs
-  The implementation class of the OzFs 
AbstractFileSystem.
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 2766b56..3a4bcce 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,7 +103,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3a.");
 
 // O3 properties are in a different subtree.
-xmlPrefixToSkipCompare.add("fs.o3.");
+xmlPrefixToSkipCompare.add("fs.o3fs.");
 
 //ftp properties are in a different subtree.
 // - org.apache.hadoop.fs.ftp.FTPFileSystem.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..b77d621 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -63,7 +63,10 @@ public final class OzoneConsts {
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
-  public static final String OZONE_URI_SCHEME = "o3";
+  // Ozone File System scheme
+  public static final String OZONE_URI_SCHEME = "o3fs";
+
+  public static final String OZONE_RPC_SCHEME = "o3";
   public static final String OZONE_HTTP_SCHEME = "http";
   public static final String OZONE_URI_DELIMITER = "/";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 675dcba..5061afa 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a1d75 -> 622919d8d


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/622919d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/622919d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/622919d8

Branch: refs/heads/branch-2.9
Commit: 622919d8dbde1eb878f2f82252e9b26c9ebcabbb
Parents: a1d
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:46:58 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/622919d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/622919d8/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compres

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e212d7d81 -> e412d8f6c


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e412d8f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e412d8f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e412d8f6

Branch: refs/heads/branch-2
Commit: e412d8f6cd8ac4677fd8d5d6ebbb38f628a3854f
Parents: e212d7d8
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:45:56 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e412d8f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e412d8f6/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compress_z

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ae42d59eb -> 0aee3a0c3


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0aee3a0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0aee3a0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0aee3a0c

Branch: refs/heads/branch-3.0
Commit: 0aee3a0c3f69dc55a9066b3c31081650e425889c
Parents: ae42d59
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:44:39 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aee3a0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aee3a0c/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compres

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9c350785d -> 65b27f8ed


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65b27f8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65b27f8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65b27f8e

Branch: refs/heads/branch-3.1
Commit: 65b27f8ed2824b40bc7730eaa960da1b759fda43
Parents: 9c35078
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:43:19 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b27f8e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b27f8e/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compres

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 6380ee551 -> 30fc5966a


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30fc5966
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30fc5966
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30fc5966

Branch: refs/heads/branch-3.2
Commit: 30fc5966a2445a3ec559f840626770f39efedbbf
Parents: 6380ee5
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:40:57 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5966/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5966/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compres

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 24dc068a3 -> 9abda8394


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9abda839
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9abda839
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9abda839

Branch: refs/heads/trunk
Commit: 9abda83947a5babfe5a650b3409ad952f6782105
Parents: 24dc068
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:38:42 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_defla
 }
 
 JNIEXPORT jstring JNICALL 
Java_org_apach

hadoop git commit: HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout. Contributed by Adam Antal.

2018-10-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk d54f5598f -> 24dc068a3


HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout.
Contributed by Adam Antal.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24dc068a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24dc068a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24dc068a

Branch: refs/heads/trunk
Commit: 24dc068a361648b4e59e1807b07ff2239f41c740
Parents: d54f559
Author: Adam Antal 
Authored: Wed Oct 17 11:32:17 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 17 11:34:50 2018 -0700

--
 .../java/org/apache/hadoop/fs/ftp/FTPFileSystem.java | 13 +
 .../src/main/resources/core-default.xml  |  8 
 .../hadoop/conf/TestCommonConfigurationFields.java   |  1 +
 .../org/apache/hadoop/fs/ftp/TestFTPFileSystem.java  | 15 +++
 4 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 676c207..4b144bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -62,6 +62,7 @@ public class FTPFileSystem extends FileSystem {
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 
   public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+  public static final long DEFAULT_TIMEOUT = 0;
   public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
   public static final String FS_FTP_HOST = "fs.ftp.host";
   public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
@@ -71,6 +72,7 @@ public class FTPFileSystem extends FileSystem {
   public static final String FS_FTP_TRANSFER_MODE = "fs.ftp.transfer.mode";
   public static final String E_SAME_DIRECTORY_ONLY =
   "only same directory renames are supported";
+  public static final String FS_FTP_TIMEOUT = "fs.ftp.timeout";
 
   private URI uri;
 
@@ -150,6 +152,7 @@ public class FTPFileSystem extends FileSystem {
   client.setFileTransferMode(getTransferMode(conf));
   client.setFileType(FTP.BINARY_FILE_TYPE);
   client.setBufferSize(DEFAULT_BUFFER_SIZE);
+  setTimeout(client, conf);
   setDataConnectionMode(client, conf);
 } else {
   throw new IOException("Login failed on server - " + host + ", port - "
@@ -160,6 +163,16 @@ public class FTPFileSystem extends FileSystem {
   }
 
   /**
+   * Set the FTPClient's timeout based on configuration.
+   * FS_FTP_TIMEOUT is set as timeout (defaults to DEFAULT_TIMEOUT).
+   */
+  @VisibleForTesting
+  void setTimeout(FTPClient client, Configuration conf) {
+long timeout = conf.getLong(FS_FTP_TIMEOUT, DEFAULT_TIMEOUT);
+client.setControlKeepAliveTimeout(timeout);
+  }
+
+  /**
* Set FTP's transfer mode based on configuration. Valid values are
* STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 32dd622..599396f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -926,6 +926,14 @@
 
 
 
+  fs.ftp.timeout
+  0
+  
+FTP filesystem's timeout in seconds.
+  
+
+
+
   fs.df.interval
   6
   Disk usage statistics refresh interval in msec.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index e10617d..2766b56 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCom

hadoop git commit: HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in dead lock. Contributed by Hanisha Koneru.

2018-10-17 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 b2ab592e0 -> 51a79cc71


HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in 
dead lock. Contributed by Hanisha Koneru.

(cherry picked from commit d54f5598f4ccd1031e8295a215a3183f3647031a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51a79cc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51a79cc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51a79cc7

Branch: refs/heads/ozone-0.3
Commit: 51a79cc71d6c35b9ec4d22de61587c8beda969d9
Parents: b2ab592
Author: Nandakumar 
Authored: Wed Oct 17 18:44:05 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 19:00:19 2018 +0530

--
 .../container/common/states/endpoint/VersionEndpointTask.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51a79cc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 2d04677..79fa174 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -75,7 +75,7 @@ public class VersionEndpointTask implements
 
   // Check volumes
   VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-  volumeSet.readLock();
+  volumeSet.writeLock();
   try {
 Map volumeMap = volumeSet.getVolumeMap();
 
@@ -94,12 +94,12 @@ public class VersionEndpointTask implements
   }
 }
 if (volumeSet.getVolumesList().size() == 0) {
-  // All volumes are inconsistent state
+  // All volumes are in inconsistent state
   throw new DiskOutOfSpaceException("All configured Volumes are in " +
   "Inconsistent State");
 }
   } finally {
-volumeSet.readUnlock();
+volumeSet.writeUnlock();
   }
 
   ozoneContainer.getDispatcher().setScmId(scmId);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in dead lock. Contributed by Hanisha Koneru.

2018-10-17 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 50715c069 -> d54f5598f


HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in 
dead lock. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d54f5598
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d54f5598
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d54f5598

Branch: refs/heads/trunk
Commit: d54f5598f4ccd1031e8295a215a3183f3647031a
Parents: 50715c0
Author: Nandakumar 
Authored: Wed Oct 17 18:44:05 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 18:44:05 2018 +0530

--
 .../container/common/states/endpoint/VersionEndpointTask.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d54f5598/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 2d04677..79fa174 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -75,7 +75,7 @@ public class VersionEndpointTask implements
 
   // Check volumes
   VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-  volumeSet.readLock();
+  volumeSet.writeLock();
   try {
 Map volumeMap = volumeSet.getVolumeMap();
 
@@ -94,12 +94,12 @@ public class VersionEndpointTask implements
   }
 }
 if (volumeSet.getVolumesList().size() == 0) {
-  // All volumes are inconsistent state
+  // All volumes are in inconsistent state
   throw new DiskOutOfSpaceException("All configured Volumes are in " +
   "Inconsistent State");
 }
   } finally {
-volumeSet.readUnlock();
+volumeSet.writeUnlock();
   }
 
   ozoneContainer.getDispatcher().setScmId(scmId);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-17 Thread nanda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 3523499..badcec7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -33,8 +33,10 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -169,7 +171,7 @@ public class SCMClientProtocolServer implements
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(remoteUser);
 return scm.getContainerManager()
-.getContainer(containerID);
+.getContainer(ContainerID.valueof(containerID));
   }
 
   @Override
@@ -177,8 +179,8 @@ public class SCMClientProtocolServer implements
   throws IOException {
 if (chillModePrecheck.isInChillMode()) {
   ContainerInfo contInfo = scm.getContainerManager()
-  .getContainer(containerID);
-  if (contInfo.isContainerOpen()) {
+  .getContainer(ContainerID.valueof(containerID));
+  if (contInfo.isOpen()) {
 if (!hasRequiredReplicas(contInfo)) {
   throw new SCMException("Open container " + containerID + " doesn't"
   + " have enough replicas to service this operation in "
@@ -189,7 +191,7 @@ public class SCMClientProtocolServer implements
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(null);
 return scm.getContainerManager()
-.getContainerWithPipeline(containerID);
+.getContainerWithPipeline(ContainerID.valueof(containerID));
   }
 
   /**
@@ -198,10 +200,10 @@ public class SCMClientProtocolServer implements
*/
   private boolean hasRequiredReplicas(ContainerInfo contInfo) {
 try{
-  return getScm().getContainerManager().getStateManager()
+  return getScm().getContainerManager()
   .getContainerReplicas(contInfo.containerID())
   .size() >= contInfo.getReplicationFactor().getNumber();
-} catch (SCMException ex) {
+} catch (ContainerNotFoundException ex) {
   // getContainerReplicas throws exception if no replica's exist for given
   // container.
   return false;
@@ -212,14 +214,14 @@ public class SCMClientProtocolServer implements
   public List listContainer(long startContainerID,
   int count) throws IOException {
 return scm.getContainerManager().
-listContainer(startContainerID, count);
+listContainer(ContainerID.valueof(startContainerID), count);
   }
 
   @Override
   public void deleteContainer(long containerID) throws IOException {
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(remoteUser);
-scm.getContainerManager().deleteContainer(containerID);
+
scm.getContainerManager().deleteContainer(ContainerID.valueof(containerID));
 
   }
 
@@ -257,10 +259,12 @@ public class SCMClientProtocolServer implements
   .ObjectStageChangeRequestProto.Op.create) {
 if (stage == StorageContainerLocationProtocolProtos
 .ObjectStageChangeRequestProto.Stage.begin) {
-  scm.getContainerManager().updateContainerState(id, HddsProtos
+  scm.getContainerManager().updateContainerState(
+  ContainerID.valueof(id), HddsProtos
   .LifeCycleEvent.CREATE);
 } else {
-  scm.getContainerManager().updateContainerState(id, HddsProtos
+  scm.getContainerManager().updateContainerState(
+  ContainerID.valueof(id), HddsProtos
   .LifeCycleEvent.CREATED);
 }
   } else {
@@ -268,10 +272,12 @@ public class SCMClientProtocolServer implements
 .ObjectStageChangeRequestProto.Op.close) {
   if (stage == StorageContainerLocationProtocolProtos
   .ObjectStageChangeRequestProto.Stage.begin) {
-scm.getContainerManager().updateContainerState(id, HddsProtos
+scm.getContainerManager()

[4/4] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-17 Thread nanda
HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. 
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50715c06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50715c06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50715c06

Branch: refs/heads/trunk
Commit: 50715c0699b260363c40ef0729c83ac26cf0
Parents: a9a63ae
Author: Nandakumar 
Authored: Wed Oct 17 17:45:35 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 17:45:35 2018 +0530

--
 .../scm/client/ContainerOperationClient.java|   2 +-
 .../hadoop/hdds/scm/client/ScmClient.java   |   2 +-
 .../hdds/scm/container/ContainerException.java  |  46 ++
 .../hadoop/hdds/scm/container/ContainerID.java  |  28 +-
 .../hdds/scm/container/ContainerInfo.java   | 449 +++
 .../container/ContainerNotFoundException.java   |  44 ++
 .../ContainerReplicaNotFoundException.java  |  45 ++
 .../container/common/helpers/ContainerInfo.java | 482 
 .../common/helpers/ContainerWithPipeline.java   |   1 +
 .../StorageContainerLocationProtocol.java   |   2 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   2 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   2 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  17 +-
 .../report/CommandStatusReportPublisher.java|   2 +-
 .../common/report/TestReportPublisher.java  |  13 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  14 +-
 .../block/DatanodeDeletedBlockTransactions.java |   6 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java |   5 +-
 .../container/CloseContainerEventHandler.java   |  31 +-
 .../scm/container/CloseContainerWatcher.java|   3 +-
 .../hdds/scm/container/ContainerManager.java|  70 ++-
 .../hdds/scm/container/ContainerReplica.java| 197 +++
 .../scm/container/ContainerReportHandler.java   |  60 +-
 .../scm/container/ContainerStateManager.java| 242 
 .../hdds/scm/container/SCMContainerManager.java | 566 ---
 .../replication/ReplicationManager.java |  38 +-
 .../scm/container/states/ContainerStateMap.java | 267 +
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  74 ++-
 .../hdds/scm/server/SCMChillModeManager.java|   2 +-
 .../scm/server/SCMClientProtocolServer.java |  32 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   2 +-
 .../scm/server/StorageContainerManager.java |  22 +-
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |   2 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  48 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   2 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |  14 +-
 .../TestCloseContainerEventHandler.java |  22 +-
 .../container/TestContainerReportHandler.java   |  66 +--
 .../container/TestContainerStateManager.java|  60 +-
 .../scm/container/TestSCMContainerManager.java  | 117 ++--
 .../replication/TestReplicationManager.java |  38 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   2 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  | 195 ---
 .../scm/server/TestSCMChillModeManager.java |   2 +-
 .../container/TestCloseContainerWatcher.java|  12 +-
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  |   2 +-
 .../hdds/scm/cli/container/ListSubcommand.java  |   4 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   2 +-
 .../TestContainerStateManagerIntegration.java   | 219 ---
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   7 +-
 .../hdds/scm/pipeline/TestPipelineClose.java|  10 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java |  10 +-
 .../ozone/TestStorageContainerManager.java  |   4 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   4 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  10 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   6 +-
 .../commandhandler/TestBlockDeletion.java   |   3 +-
 .../TestCloseContainerByPipeline.java   |  10 +-
 .../TestCloseContainerHandler.java  |   4 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   2 +-
 .../hadoop/ozone/om/TestScmChillMode.java   |  14 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |   4 -
 .../genesis/BenchMarkContainerStateMap.java |  14 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   8 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   3 +-
 65 files changed, 1949 insertions(+), 1739 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/

[3/4] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-17 Thread nanda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 7078b8f..42b39f9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -17,17 +17,12 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.container.states.ContainerState;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -45,11 +40,8 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Set;
@@ -116,7 +108,7 @@ import static 
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
  * TimeOut Delete Container State Machine - if the container creating times 
out,
  * then Container State manager decides to delete the container.
  */
-public class ContainerStateManager implements Closeable {
+public class ContainerStateManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ContainerStateManager.class);
 
@@ -135,11 +127,10 @@ public class ContainerStateManager implements Closeable {
* TODO : Add Container Tags so we know which containers are owned by SCM.
*/
   @SuppressWarnings("unchecked")
-  public ContainerStateManager(Configuration configuration,
-  ContainerManager containerManager, PipelineSelector pipelineSelector) {
+  public ContainerStateManager(final Configuration configuration) {
 
 // Initialize the container state machine.
-Set finalStates = new HashSet();
+final Set finalStates = new HashSet();
 
 // These are the steady states of a container.
 finalStates.add(LifeCycleState.OPEN);
@@ -155,22 +146,9 @@ public class ContainerStateManager implements Closeable {
 ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
 StorageUnit.BYTES);
 
-lastUsedMap = new ConcurrentHashMap<>();
-containerCount = new AtomicLong(0);
-containers = new ContainerStateMap();
-  }
-
-  /**
-   * Return the info of all the containers kept by the in-memory mapping.
-   *
-   * @return the list of all container info.
-   */
-  public List getAllContainers() {
-List list = new ArrayList<>();
-
-//No Locking needed since the return value is an immutable map.
-containers.getContainerMap().forEach((key, value) -> list.add(value));
-return list;
+this.lastUsedMap = new ConcurrentHashMap<>();
+this.containerCount = new AtomicLong(0);
+this.containers = new ContainerStateMap();
   }
 
   /*
@@ -244,17 +222,15 @@ public class ContainerStateManager implements Closeable {
 LifeCycleEvent.CLEANUP);
   }
 
-  public void addExistingContainer(ContainerInfo containerInfo)
+  void loadContainer(final ContainerInfo containerInfo)
   throws SCMException {
 containers.addContainer(containerInfo);
-long containerID = containerInfo.getContainerID();
-if (containerCount.get() < containerID) {
-  containerCount.set(containerID);
-}
+containerCount.set(Long.max(
+containerInfo.getContainerID(), containerCount.get()));
   }
 
   /**
-   * allocates a new container based on the type, replication etc.
+   * Allocates a new container based on the type, replication etc.
*
* @param selector -- Pipeline selector class.
* @param type -- Replication type.
@@ -262,25 +238,22 @@ public class ContainerStateManager implements Closeable {
* @return ContainerWithPipeline
* @throws IOException  on Failure.
*/
-  public ContainerWithPipeline allocateContainer(PipelineSelector selector,
-  HddsProtos.ReplicationType type,
-  HddsProtos.ReplicationFactor replicationFactor, String owner)
+  Conta

[1/4] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-17 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk a9a63ae4a -> 50715c069


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index ed8b1e3..03c99ef 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.primitives.Longs;
 import java.util.Set;
 import java.util.UUID;
 import org.apache.commons.lang3.RandomUtils;
@@ -24,26 +23,22 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableSet;
-import java.util.Random;
+import java.util.concurrent.TimeoutException;
+
 import org.slf4j.event.Level;
 
 /**
@@ -57,7 +52,6 @@ public class TestContainerStateManagerIntegration {
   private StorageContainerManager scm;
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
-  private PipelineSelector selector;
   private String containerOwner = "OZONE";
 
 
@@ -70,8 +64,8 @@ public class TestContainerStateManagerIntegration {
 xceiverClientManager = new XceiverClientManager(conf);
 scm = cluster.getStorageContainerManager();
 containerManager = scm.getContainerManager();
-containerStateManager = containerManager.getStateManager();
-selector = containerManager.getPipelineSelector();
+containerStateManager = ((SCMContainerManager)containerManager)
+.getContainerStateManager();
   }
 
   @After
@@ -88,13 +82,13 @@ public class TestContainerStateManagerIntegration {
 .allocateContainer(
 xceiverClientManager.getType(),
 xceiverClientManager.getFactor(), containerOwner);
+ContainerStateManager stateManager = new ContainerStateManager(conf);
 ContainerInfo info = containerStateManager
 .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
 xceiverClientManager.getType(), xceiverClientManager.getFactor(),
 HddsProtos.LifeCycleState.ALLOCATED);
 Assert.assertEquals(container1.getContainerInfo().getContainerID(),
 info.getContainerID());
-Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
 Assert.assertEquals(containerOwner, info.getOwner());
 Assert.assertEquals(xceiverClientManager.getType(),
 info.getReplicationType());
@@ -117,35 +111,49 @@ public class TestContainerStateManagerIntegration {
   }
 
   @Test
-  public void testContainerStateManagerRestart() throws IOException {
+  public void testContainerStateManagerRestart()
+  throws IOException, TimeoutException, InterruptedException {
 // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
 
-List containers = new ArrayList<>();
 for (int i = 0; i < 10; i++) {
+
   ContainerWithPipeline container = scm.getClientProtocolServer()
   .allocateContainer(
   xceiverClientManager.getType(),
   xceiverClientManager.getFactor(), containerOwner);
-  containers.add(container.getContainerInfo());
   if (i >= 5) {
 scm.getContainerManager().updateContainerState(container
-.getContainerInfo().getContainerID(),
+.getContainerInfo().containerID(),
 HddsProtos.LifeCycleEvent.CREATE);
   }
 }
 
-// New instance of ContainerStateManager should load all the containers in
-// container store.
-Contai

hadoop git commit: HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.

2018-10-17 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 c09f80885 -> b2ab592e0


HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.

(cherry picked from commit a9a63ae4a8367e66d5ec86b0097326b8491e4b1e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2ab592e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2ab592e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2ab592e

Branch: refs/heads/ozone-0.3
Commit: b2ab592e0a2f07826c045d735de863f1d3b14356
Parents: c09f808
Author: Márton Elek 
Authored: Wed Oct 17 12:44:53 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 13:01:27 2018 +0200

--
 .../server-scm/src/main/resources/webapps/scm/scm-overview.html  | 2 +-
 hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js | 4 
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ab592e/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
--
diff --git 
a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index fca23ba..de4894a 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -50,7 +50,7 @@
 
 
 Node Manager: Chill mode status
-{{$ctrl.nodemanagermetrics.ChillModeStatus}}
+{{$ctrl.scmmetrics.InChillMode}}
 
 
 Node Manager: Manual chill mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ab592e/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
--
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
index bcfa8b7..7c6dc91 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
@@ -34,6 +34,10 @@
 .then(function (result) {
 ctrl.nodemanagermetrics = result.data.beans[0];
 });
+
$http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime")
+.then(function (result) {
+ctrl.scmmetrics = result.data.beans[0];
+});
 
 var statusSortOrder = {
 "HEALTHY": "a",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat Viswanadham.

2018-10-17 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 f66e3c59c -> c09f80885


HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c09f8088
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c09f8088
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c09f8088

Branch: refs/heads/ozone-0.3
Commit: c09f8088534ab96951a995aa340a27ae8c1d3038
Parents: f66e3c5
Author: Márton Elek 
Authored: Wed Oct 17 11:34:03 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 12:48:33 2018 +0200

--
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java | 19 ++-
 .../ozone/s3/TestVirtualHostStyleFilter.java| 25 +++-
 2 files changed, 21 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c09f8088/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
index 3bd690b..4cf78b6 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
@@ -49,8 +49,6 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 
   private static final Logger LOG = LoggerFactory.getLogger(
   VirtualHostStyleFilter.class);
-  private static final Pattern URL_SCHEME_PATTERN = Pattern.compile("" +
-  "(?(.+))\\.(?(.+))\\.");
 
   @Inject
   private OzoneConfiguration conf;
@@ -83,24 +81,21 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 // address length means it is virtual host style, we need to convert to
 // path style.
 if (host.length() > domain.length()) {
-  String bothNames = host.substring(0, host.length() - domain.length());
-  LOG.debug("Both volume name and bucket name is {}", bothNames);
-  Matcher matcher = URL_SCHEME_PATTERN.matcher(bothNames);
+  String bucketName = host.substring(0, host.length() - domain.length());
 
-  if (!matcher.matches()) {
+  if(!bucketName.endsWith(".")) {
+//Checking this as the virtual host style pattern is 
http://bucket.host/
 throw getException("Invalid S3 Gateway request {" + requestContext
 .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host
 + " is in invalid format");
+  } else {
+bucketName = bucketName.substring(0, bucketName.length() - 1);
   }
-
-  String bucketStr = matcher.group("bucket");
-  String volumeStr = matcher.group("volume");
-
-  LOG.debug("bucket {}, volumeStr {}", bucketStr, volumeStr);
+  LOG.debug("Bucket name is {}", bucketName);
 
   URI baseURI = requestContext.getUriInfo().getBaseUri();
   String currentPath = requestContext.getUriInfo().getPath();
-  String newPath = String.format("%s/%s", volumeStr, bucketStr);
+  String newPath = bucketName;
   if (currentPath != null) {
 newPath += String.format("%s", currentPath);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c09f8088/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index ac8fa87..5548c77 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -31,6 +31,8 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.SecurityContext;
 import java.net.URI;
 
+import static org.junit.Assert.fail;
+
 /**
  * This class test virtual host style mapping conversion to path style.
  */
@@ -87,10 +89,10 @@ public class TestVirtualHostStyleFilter {
 virtualHostStyleFilter.setConfiguration(conf);
 
 ContainerRequest containerRequest = createContainerRequest("mybucket" +
-".myvolume.localhost:9878", "/myfile", true);
+".localhost:9878", "/myfile", true);
 virtualHostStyleFilter.filter(containerRequest);
 URI expected = new URI("http://"; + s3HttpAddr +
-"/myvolume/mybucket/myfile");
+"/mybucket/myfile");
 Assert.assertEquals(expected, containerRequest.getReques

hadoop git commit: HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.

2018-10-17 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9df1c84be -> a9a63ae4a


HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9a63ae4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9a63ae4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9a63ae4

Branch: refs/heads/trunk
Commit: a9a63ae4a8367e66d5ec86b0097326b8491e4b1e
Parents: 9df1c84
Author: Márton Elek 
Authored: Wed Oct 17 12:44:53 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 12:54:01 2018 +0200

--
 .../server-scm/src/main/resources/webapps/scm/scm-overview.html  | 2 +-
 hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js | 4 
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a63ae4/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
--
diff --git 
a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index fca23ba..de4894a 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -50,7 +50,7 @@
 
 
 Node Manager: Chill mode status
-{{$ctrl.nodemanagermetrics.ChillModeStatus}}
+{{$ctrl.scmmetrics.InChillMode}}
 
 
 Node Manager: Manual chill mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a63ae4/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
--
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
index bcfa8b7..7c6dc91 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
@@ -34,6 +34,10 @@
 .then(function (result) {
 ctrl.nodemanagermetrics = result.data.beans[0];
 });
+
$http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime")
+.then(function (result) {
+ctrl.scmmetrics = result.data.beans[0];
+});
 
 var statusSortOrder = {
 "HEALTHY": "a",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat Viswanadham.

2018-10-17 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5085e5fa9 -> 9df1c84be


HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9df1c84b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9df1c84b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9df1c84b

Branch: refs/heads/trunk
Commit: 9df1c84be0f86f6c26030ba1b98e9a2b93dc743c
Parents: 5085e5f
Author: Márton Elek 
Authored: Wed Oct 17 11:34:03 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 12:43:44 2018 +0200

--
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java | 19 ++-
 .../ozone/s3/TestVirtualHostStyleFilter.java| 25 +++-
 2 files changed, 21 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9df1c84b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
index 3bd690b..4cf78b6 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
@@ -49,8 +49,6 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 
   private static final Logger LOG = LoggerFactory.getLogger(
   VirtualHostStyleFilter.class);
-  private static final Pattern URL_SCHEME_PATTERN = Pattern.compile("" +
-  "(?(.+))\\.(?(.+))\\.");
 
   @Inject
   private OzoneConfiguration conf;
@@ -83,24 +81,21 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 // address length means it is virtual host style, we need to convert to
 // path style.
 if (host.length() > domain.length()) {
-  String bothNames = host.substring(0, host.length() - domain.length());
-  LOG.debug("Both volume name and bucket name is {}", bothNames);
-  Matcher matcher = URL_SCHEME_PATTERN.matcher(bothNames);
+  String bucketName = host.substring(0, host.length() - domain.length());
 
-  if (!matcher.matches()) {
+  if(!bucketName.endsWith(".")) {
+//Checking this as the virtual host style pattern is 
http://bucket.host/
 throw getException("Invalid S3 Gateway request {" + requestContext
 .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host
 + " is in invalid format");
+  } else {
+bucketName = bucketName.substring(0, bucketName.length() - 1);
   }
-
-  String bucketStr = matcher.group("bucket");
-  String volumeStr = matcher.group("volume");
-
-  LOG.debug("bucket {}, volumeStr {}", bucketStr, volumeStr);
+  LOG.debug("Bucket name is {}", bucketName);
 
   URI baseURI = requestContext.getUriInfo().getBaseUri();
   String currentPath = requestContext.getUriInfo().getPath();
-  String newPath = String.format("%s/%s", volumeStr, bucketStr);
+  String newPath = bucketName;
   if (currentPath != null) {
 newPath += String.format("%s", currentPath);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9df1c84b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index ac8fa87..5548c77 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -31,6 +31,8 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.SecurityContext;
 import java.net.URI;
 
+import static org.junit.Assert.fail;
+
 /**
  * This class test virtual host style mapping conversion to path style.
  */
@@ -87,10 +89,10 @@ public class TestVirtualHostStyleFilter {
 virtualHostStyleFilter.setConfiguration(conf);
 
 ContainerRequest containerRequest = createContainerRequest("mybucket" +
-".myvolume.localhost:9878", "/myfile", true);
+".localhost:9878", "/myfile", true);
 virtualHostStyleFilter.filter(containerRequest);
 URI expected = new URI("http://"; + s3HttpAddr +
-"/myvolume/mybucket/myfile");
+"/mybucket/myfile");
 Assert.assertEquals(expected, containerRequest.getRequestUri());

hadoop git commit: YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes other test failures. Contributed by Antal Bálint Steinbach.

2018-10-17 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 88ea877ac -> 6380ee551


YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes 
other test failures. Contributed by Antal Bálint Steinbach.

(cherry picked from commit 5085e5fa9e8c7e489a8518e8541c12b14f3651df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6380ee55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6380ee55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6380ee55

Branch: refs/heads/branch-3.2
Commit: 6380ee5512e3cadeca0c6097bd2dd2cb9ef4bc1d
Parents: 88ea877
Author: Sunil G 
Authored: Wed Oct 17 16:05:08 2018 +0530
Committer: Sunil G 
Committed: Wed Oct 17 16:06:07 2018 +0530

--
 .../yarn/util/resource/TestResourceUtils.java   | 51 
 .../resourcemanager/TestClientRMService.java| 17 ---
 2 files changed, 32 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6380ee55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index 9b48017..c96982d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -39,6 +39,9 @@ import java.util.Map;
  */
 public class TestResourceUtils {
 
+  private File nodeResourcesFile;
+  private File resourceTypesFile;
+
   static class ResourceFileInformation {
 String filename;
 int resourceCount;
@@ -75,12 +78,11 @@ public class TestResourceUtils {
 
   @After
   public void teardown() {
-Configuration conf = new YarnConfiguration();
-File source = new File(
-conf.getClassLoader().getResource("resource-types-1.xml").getFile());
-File dest = new File(source.getParent(), "resource-types.xml");
-if (dest.exists()) {
-  dest.delete();
+if(nodeResourcesFile != null && nodeResourcesFile.exists()) {
+  nodeResourcesFile.delete();
+}
+if(resourceTypesFile != null && resourceTypesFile.exists()) {
+  resourceTypesFile.delete();
 }
   }
 
@@ -136,8 +138,8 @@ public class TestResourceUtils {
   File source = new File(
   conf.getClassLoader().getResource(testInformation.filename)
   .getFile());
-  File dest = new File(source.getParent(), "resource-types.xml");
-  FileUtils.copyFile(source, dest);
+  resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+  FileUtils.copyFile(source, resourceTypesFile);
   res = ResourceUtils.getResourceTypes();
   testMemoryAndVcores(res);
   Assert.assertEquals(testInformation.resourceCount, res.size());
@@ -148,7 +150,6 @@ public class TestResourceUtils {
 res.containsKey(resourceName));
 Assert.assertEquals(entry.getValue(), 
res.get(resourceName).getUnits());
   }
-  dest.delete();
 }
   }
 
@@ -161,20 +162,17 @@ public class TestResourceUtils {
 "resource-types-error-4.xml"};
 for (String resourceFile : resourceFiles) {
   ResourceUtils.resetResourceTypes();
-  File dest = null;
   try {
 File source =
 new 
File(conf.getClassLoader().getResource(resourceFile).getFile());
-dest = new File(source.getParent(), "resource-types.xml");
-FileUtils.copyFile(source, dest);
+resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, resourceTypesFile);
 ResourceUtils.getResourceTypes();
 Assert.fail("Expected error with file " + resourceFile);
   } catch (NullPointerException ne) {
 throw ne;
   } catch (Exception e) {
-if (dest != null) {
-  dest.delete();
-}
+//Test passed
   }
 }
   }
@@ -275,7 +273,7 @@ public class TestResourceUtils {
 ResourceUtils.initializeResourcesMap(conf);
 Assert.fail("resource map initialization should fail");
   } catch (Exception e) {
-// do nothing
+//Test passed
   }
 }
   }
@@ -299,11 +297,10 @@ public class TestResourceUtils {
 for (Map.Entry entry : testRun.entrySet()) {
   String resourceFile = entry.getKey();
   ResourceUtils.resetNodeResources();
-  File dest;
   File source = new File(
   conf.getClassLoader(

hadoop git commit: YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes other test failures. Contributed by Antal Bálint Steinbach.

2018-10-17 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 41b3603b5 -> 5085e5fa9


YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes 
other test failures. Contributed by Antal Bálint Steinbach.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5085e5fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5085e5fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5085e5fa

Branch: refs/heads/trunk
Commit: 5085e5fa9e8c7e489a8518e8541c12b14f3651df
Parents: 41b3603
Author: Sunil G 
Authored: Wed Oct 17 16:05:08 2018 +0530
Committer: Sunil G 
Committed: Wed Oct 17 16:05:08 2018 +0530

--
 .../yarn/util/resource/TestResourceUtils.java   | 51 
 .../resourcemanager/TestClientRMService.java| 17 ---
 2 files changed, 32 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5085e5fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index 9b48017..c96982d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -39,6 +39,9 @@ import java.util.Map;
  */
 public class TestResourceUtils {
 
+  private File nodeResourcesFile;
+  private File resourceTypesFile;
+
   static class ResourceFileInformation {
 String filename;
 int resourceCount;
@@ -75,12 +78,11 @@ public class TestResourceUtils {
 
   @After
   public void teardown() {
-Configuration conf = new YarnConfiguration();
-File source = new File(
-conf.getClassLoader().getResource("resource-types-1.xml").getFile());
-File dest = new File(source.getParent(), "resource-types.xml");
-if (dest.exists()) {
-  dest.delete();
+if(nodeResourcesFile != null && nodeResourcesFile.exists()) {
+  nodeResourcesFile.delete();
+}
+if(resourceTypesFile != null && resourceTypesFile.exists()) {
+  resourceTypesFile.delete();
 }
   }
 
@@ -136,8 +138,8 @@ public class TestResourceUtils {
   File source = new File(
   conf.getClassLoader().getResource(testInformation.filename)
   .getFile());
-  File dest = new File(source.getParent(), "resource-types.xml");
-  FileUtils.copyFile(source, dest);
+  resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+  FileUtils.copyFile(source, resourceTypesFile);
   res = ResourceUtils.getResourceTypes();
   testMemoryAndVcores(res);
   Assert.assertEquals(testInformation.resourceCount, res.size());
@@ -148,7 +150,6 @@ public class TestResourceUtils {
 res.containsKey(resourceName));
 Assert.assertEquals(entry.getValue(), 
res.get(resourceName).getUnits());
   }
-  dest.delete();
 }
   }
 
@@ -161,20 +162,17 @@ public class TestResourceUtils {
 "resource-types-error-4.xml"};
 for (String resourceFile : resourceFiles) {
   ResourceUtils.resetResourceTypes();
-  File dest = null;
   try {
 File source =
 new 
File(conf.getClassLoader().getResource(resourceFile).getFile());
-dest = new File(source.getParent(), "resource-types.xml");
-FileUtils.copyFile(source, dest);
+resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, resourceTypesFile);
 ResourceUtils.getResourceTypes();
 Assert.fail("Expected error with file " + resourceFile);
   } catch (NullPointerException ne) {
 throw ne;
   } catch (Exception e) {
-if (dest != null) {
-  dest.delete();
-}
+//Test passed
   }
 }
   }
@@ -275,7 +273,7 @@ public class TestResourceUtils {
 ResourceUtils.initializeResourcesMap(conf);
 Assert.fail("resource map initialization should fail");
   } catch (Exception e) {
-// do nothing
+//Test passed
   }
 }
   }
@@ -299,11 +297,10 @@ public class TestResourceUtils {
 for (Map.Entry entry : testRun.entrySet()) {
   String resourceFile = entry.getKey();
   ResourceUtils.resetNodeResources();
-  File dest;
   File source = new File(
   conf.getClassLoader().getResource(resourceFile).getFile());
-  dest = new File(source.getParent(

[3/4] hadoop git commit: HADOOP-15861. Move DelegationTokenIssuer to the right path. Contributed by Wei-Chiu Chuang.

2018-10-17 Thread stevel
HADOOP-15861. Move DelegationTokenIssuer to the right path.
Contributed by Wei-Chiu Chuang.

(cherry picked from commit 41b3603b5bcb74a7d78a314a4a5c177d941af27f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c350785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c350785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c350785

Branch: refs/heads/branch-3.1
Commit: 9c350785d4820bbf82f657eaac7568b0982fd121
Parents: ddd7da5
Author: Steve Loughran 
Authored: Wed Oct 17 11:03:22 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 11:03:22 2018 +0100

--
 .../security/token/DelegationTokenIssuer.java   | 111 +++
 .../security/token/DelegationTokenIssuer.java   | 111 ---
 2 files changed, 111 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c350785/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
new file mode 100644
index 000..70a53b7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for issuing delegation tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
+@InterfaceStability.Unstable
+public interface DelegationTokenIssuer {
+
+  /**
+   * The service name used as the alias for the  token in the credential
+   * token map.  addDelegationTokens will use this to determine if
+   * a token exists, and if not, add a new token with this alias.
+   */
+  String getCanonicalServiceName();
+
+  /**
+   * Unconditionally get a new token with the optional renewer.  Returning
+   * null indicates the service does not issue tokens.
+   */
+  Token getDelegationToken(String renewer) throws IOException;
+
+  /**
+   * Issuers may need tokens from additional services.
+   */
+  default DelegationTokenIssuer[] getAdditionalTokenIssuers()
+  throws IOException {
+return null;
+  }
+
+  /**
+   * Given a renewer, add delegation tokens for issuer and it's child issuers
+   * to the Credentials object if it is not already present.
+   *
+   * Note: This method is not intended to be overridden.  Issuers should
+   * implement getCanonicalService and getDelegationToken to ensure
+   * consistent token acquisition behavior.
+   *
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  default Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+if (credentials == null) {
+  credentials = new Credentials();
+}
+final List> tokens = new ArrayList<>();
+collectDelegationTokens(this, renewer, credentials, tokens);
+return tokens.toArray(new Token[tokens.size()]);
+  }
+
+  /**
+   * NEVER call this method directly.
+   */
+  @InterfaceAudience.Private
+  static void collectDelegationTokens(
+  final DelegationTokenIssuer issuer,
+  final String renewer,
+  final Credentials credentials,
+  final List> tokens) throws IOException {
+final String serviceName = issuer.getCanonicalServi

[4/4] hadoop git commit: HADOOP-15861. Move DelegationTokenIssuer to the right path. Contributed by Wei-Chiu Chuang.

2018-10-17 Thread stevel
HADOOP-15861. Move DelegationTokenIssuer to the right path.
Contributed by Wei-Chiu Chuang.

(cherry picked from commit 41b3603b5bcb74a7d78a314a4a5c177d941af27f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae42d59e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae42d59e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae42d59e

Branch: refs/heads/branch-3.0
Commit: ae42d59ebae02167b85e9d41ee7e2cfe44770580
Parents: 99b447f
Author: Steve Loughran 
Authored: Wed Oct 17 11:04:05 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 11:04:05 2018 +0100

--
 .../security/token/DelegationTokenIssuer.java   | 111 +++
 .../security/token/DelegationTokenIssuer.java   | 111 ---
 2 files changed, 111 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae42d59e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
new file mode 100644
index 000..70a53b7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for issuing delegation tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
+@InterfaceStability.Unstable
+public interface DelegationTokenIssuer {
+
+  /**
+   * The service name used as the alias for the  token in the credential
+   * token map.  addDelegationTokens will use this to determine if
+   * a token exists, and if not, add a new token with this alias.
+   */
+  String getCanonicalServiceName();
+
+  /**
+   * Unconditionally get a new token with the optional renewer.  Returning
+   * null indicates the service does not issue tokens.
+   */
+  Token getDelegationToken(String renewer) throws IOException;
+
+  /**
+   * Issuers may need tokens from additional services.
+   */
+  default DelegationTokenIssuer[] getAdditionalTokenIssuers()
+  throws IOException {
+return null;
+  }
+
+  /**
+   * Given a renewer, add delegation tokens for issuer and it's child issuers
+   * to the Credentials object if it is not already present.
+   *
+   * Note: This method is not intended to be overridden.  Issuers should
+   * implement getCanonicalService and getDelegationToken to ensure
+   * consistent token acquisition behavior.
+   *
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  default Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+if (credentials == null) {
+  credentials = new Credentials();
+}
+final List> tokens = new ArrayList<>();
+collectDelegationTokens(this, renewer, credentials, tokens);
+return tokens.toArray(new Token[tokens.size()]);
+  }
+
+  /**
+   * NEVER call this method directly.
+   */
+  @InterfaceAudience.Private
+  static void collectDelegationTokens(
+  final DelegationTokenIssuer issuer,
+  final String renewer,
+  final Credentials credentials,
+  final List> tokens) throws IOException {
+final String serviceName = issuer.getCanonicalServi

[1/4] hadoop git commit: HADOOP-15861. Move DelegationTokenIssuer to the right path. Contributed by Wei-Chiu Chuang.

2018-10-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 99b447f61 -> ae42d59eb
  refs/heads/branch-3.1 ddd7da52c -> 9c350785d
  refs/heads/branch-3.2 08b415d17 -> 88ea877ac
  refs/heads/trunk b738cb148 -> 41b3603b5


HADOOP-15861. Move DelegationTokenIssuer to the right path.
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41b3603b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41b3603b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41b3603b

Branch: refs/heads/trunk
Commit: 41b3603b5bcb74a7d78a314a4a5c177d941af27f
Parents: b738cb1
Author: Steve Loughran 
Authored: Wed Oct 17 11:01:53 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 11:01:53 2018 +0100

--
 .../security/token/DelegationTokenIssuer.java   | 111 +++
 .../security/token/DelegationTokenIssuer.java   | 111 ---
 2 files changed, 111 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b3603b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
new file mode 100644
index 000..70a53b7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for issuing delegation tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
+@InterfaceStability.Unstable
+public interface DelegationTokenIssuer {
+
+  /**
+   * The service name used as the alias for the  token in the credential
+   * token map.  addDelegationTokens will use this to determine if
+   * a token exists, and if not, add a new token with this alias.
+   */
+  String getCanonicalServiceName();
+
+  /**
+   * Unconditionally get a new token with the optional renewer.  Returning
+   * null indicates the service does not issue tokens.
+   */
+  Token getDelegationToken(String renewer) throws IOException;
+
+  /**
+   * Issuers may need tokens from additional services.
+   */
+  default DelegationTokenIssuer[] getAdditionalTokenIssuers()
+  throws IOException {
+return null;
+  }
+
+  /**
+   * Given a renewer, add delegation tokens for issuer and it's child issuers
+   * to the Credentials object if it is not already present.
+   *
+   * Note: This method is not intended to be overridden.  Issuers should
+   * implement getCanonicalService and getDelegationToken to ensure
+   * consistent token acquisition behavior.
+   *
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  default Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+if (credentials == null) {
+  credentials = new Credentials();
+}
+final List> tokens = new ArrayList<>();
+collectDelegationTokens(this, renewer, credentials, tokens);
+return tokens.toArray(new Token[tokens.size()]);
+  }
+
+  /**
+   * NEVER call this method directly.
+   */
+  @InterfaceAudience.Private
+  static void collectDelegationTokens(
+  final DelegationTokenIssuer issuer,
+  final String rene

[2/4] hadoop git commit: HADOOP-15861. Move DelegationTokenIssuer to the right path. Contributed by Wei-Chiu Chuang.

2018-10-17 Thread stevel
HADOOP-15861. Move DelegationTokenIssuer to the right path.
Contributed by Wei-Chiu Chuang.

(cherry picked from commit 41b3603b5bcb74a7d78a314a4a5c177d941af27f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88ea877a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88ea877a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88ea877a

Branch: refs/heads/branch-3.2
Commit: 88ea877ac0edc595f256b4d5475e57a3ce5c3e94
Parents: 08b415d
Author: Steve Loughran 
Authored: Wed Oct 17 11:02:47 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 11:02:47 2018 +0100

--
 .../security/token/DelegationTokenIssuer.java   | 111 +++
 .../security/token/DelegationTokenIssuer.java   | 111 ---
 2 files changed, 111 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ea877a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
new file mode 100644
index 000..70a53b7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for issuing delegation tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
+@InterfaceStability.Unstable
+public interface DelegationTokenIssuer {
+
+  /**
+   * The service name used as the alias for the  token in the credential
+   * token map.  addDelegationTokens will use this to determine if
+   * a token exists, and if not, add a new token with this alias.
+   */
+  String getCanonicalServiceName();
+
+  /**
+   * Unconditionally get a new token with the optional renewer.  Returning
+   * null indicates the service does not issue tokens.
+   */
+  Token getDelegationToken(String renewer) throws IOException;
+
+  /**
+   * Issuers may need tokens from additional services.
+   */
+  default DelegationTokenIssuer[] getAdditionalTokenIssuers()
+  throws IOException {
+return null;
+  }
+
+  /**
+   * Given a renewer, add delegation tokens for issuer and it's child issuers
+   * to the Credentials object if it is not already present.
+   *
+   * Note: This method is not intended to be overridden.  Issuers should
+   * implement getCanonicalService and getDelegationToken to ensure
+   * consistent token acquisition behavior.
+   *
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  default Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+if (credentials == null) {
+  credentials = new Credentials();
+}
+final List> tokens = new ArrayList<>();
+collectDelegationTokens(this, renewer, credentials, tokens);
+return tokens.toArray(new Token[tokens.size()]);
+  }
+
+  /**
+   * NEVER call this method directly.
+   */
+  @InterfaceAudience.Private
+  static void collectDelegationTokens(
+  final DelegationTokenIssuer issuer,
+  final String renewer,
+  final Credentials credentials,
+  final List> tokens) throws IOException {
+final String serviceName = issuer.getCanonicalServi

hadoop git commit: HADOOP-15854. AuthToken Use StringBuilder instead of StringBuffer. Contributed by Beluga Behr.

2018-10-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 64a43c92c -> b738cb148


HADOOP-15854. AuthToken Use StringBuilder instead of StringBuffer.
Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b738cb14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b738cb14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b738cb14

Branch: refs/heads/trunk
Commit: b738cb148cb2a15e72a5c27200eca2d4b383bf9c
Parents: 64a43c9
Author: Steve Loughran 
Authored: Wed Oct 17 10:29:09 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 10:29:09 2018 +0100

--
 .../org/apache/hadoop/security/authentication/util/AuthToken.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b738cb14/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
index e959f65..844501c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
@@ -128,7 +128,7 @@ public class AuthToken implements Principal {
* Generates the token.
*/
   private void generateToken() {
-StringBuffer sb = new StringBuffer();
+StringBuilder sb = new StringBuilder();
 
sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
 sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
 sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-656. Add logic for pipeline report and action processing in new pipeline code. Contributed by Lokesh Jain.

2018-10-17 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 533138718 -> 64a43c92c


HDDS-656. Add logic for pipeline report and action processing in new pipeline 
code. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64a43c92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64a43c92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64a43c92

Branch: refs/heads/trunk
Commit: 64a43c92c2133f3b9a317dcc4f0391ad6b604194
Parents: 5331387
Author: Nandakumar 
Authored: Wed Oct 17 13:56:54 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 13:57:38 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientRatis.java |  19 ++
 .../hadoop/hdds/scm/pipeline/Pipeline.java  | 103 ++---
 hadoop-hdds/common/src/main/proto/hdds.proto|   1 +
 .../scm/pipeline/PipelineActionHandler.java |  66 ++
 .../hdds/scm/pipeline/PipelineFactory.java  |   7 +-
 .../hdds/scm/pipeline/PipelineManager.java  |  13 +-
 .../scm/pipeline/PipelineReportHandler.java | 104 +
 .../hdds/scm/pipeline/PipelineStateManager.java | 135 +++-
 .../hdds/scm/pipeline/PipelineStateMap.java |  76 ---
 .../scm/pipeline/RatisPipelineProvider.java |  26 ++-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  81 ---
 .../scm/pipeline/SimplePipelineProvider.java|   6 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  17 ++
 .../scm/pipeline/TestPipelineStateManager.java  | 209 ++-
 .../scm/pipeline/TestRatisPipelineProvider.java |  18 +-
 .../scm/pipeline/TestSCMPipelineManager.java| 187 +
 .../pipeline/TestSimplePipelineProvider.java|  16 +-
 17 files changed, 804 insertions(+), 280 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a43c92/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 4efe7ba..45e9d6e 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.thirdparty.com.google.protobuf
@@ -73,6 +74,24 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 retryPolicy);
   }
 
+  public static XceiverClientRatis newXceiverClientRatis(
+  org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
+  Configuration ozoneConf) {
+final String rpcType = ozoneConf
+.get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+final int maxOutstandingRequests =
+HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
+final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
+Pipeline pipeline1 =
+new Pipeline(pipeline.getNodes().get(0).getUuidString(),
+HddsProtos.LifeCycleState.OPEN, pipeline.getType(),
+pipeline.getFactor(), 
PipelineID.valueOf(pipeline.getID().getId()));
+return new XceiverClientRatis(pipeline1,
+SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
+retryPolicy);
+  }
+
   private final Pipeline pipeline;
   private final RpcType rpcType;
   private final AtomicReference client = new AtomicReference<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a43c92/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index b58a001..b22a0c6 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -23,12 +23,14 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProt