[hadoop] branch trunk updated: HDDS-936. Need a tool to map containers to ozone objects. Contributed by Sarun Singla

2019-02-13 Thread jitendra
This is an automated email from the ASF dual-hosted git repository.

jitendra pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fa067aa  HDDS-936. Need a tool to map containers to ozone objects. 
Contributed by Sarun Singla
fa067aa is described below

commit fa067aa157d497e780a7f064924921fe6b3e3a7f
Author: Jitendra Pandey 
AuthorDate: Wed Feb 13 15:43:14 2019 -0800

HDDS-936. Need a tool to map containers to ozone objects. Contributed by 
Sarun Singla
---
 hadoop-ozone/tools/pom.xml |   5 +
 .../apache/hadoop/ozone/fsck/BlockIdDetails.java   |  66 ++
 .../apache/hadoop/ozone/fsck/ContainerMapper.java  | 134 +
 .../org/apache/hadoop/ozone/fsck/package-info.java |  44 +++
 .../hadoop/ozone/fsck/TestContainerMapper.java | 117 ++
 .../org/apache/hadoop/ozone/fsck/package-info.java |  44 +++
 6 files changed, 410 insertions(+)

diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 1af72c9..95bef70 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -78,6 +78,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   2.15.0
   test
 
+  
+  org.apache.hadoop
+  hadoop-ozone-ozone-manager
+  0.4.0-SNAPSHOT
+  
   
   
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
new file mode 100644
index 000..4f7b8a1
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java
@@ -0,0 +1,66 @@
+package org.apache.hadoop.ozone.fsck;
+
+import java.util.Objects;
+
+/**
+ * Getter and Setter for BlockDetails.
+ */
+
+public class BlockIdDetails {
+
+  private String bucketName;
+  private String blockVol;
+  private  String keyName;
+
+  public String getBucketName() {
+return bucketName;
+  }
+
+  public void setBucketName(String bucketName) {
+this.bucketName = bucketName;
+  }
+
+  public String getBlockVol() {
+return blockVol;
+  }
+
+  public void setBlockVol(String blockVol) {
+this.blockVol = blockVol;
+  }
+
+  public String getKeyName() {
+return keyName;
+  }
+
+  public void setKeyName(String keyName) {
+this.keyName = keyName;
+  }
+
+  @Override
+  public String toString() {
+return "BlockIdDetails{" +
+"bucketName='" + bucketName + '\'' +
+", blockVol='" + blockVol + '\'' +
+", keyName='" + keyName + '\'' +
+'}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+BlockIdDetails that = (BlockIdDetails) o;
+return Objects.equals(bucketName, that.bucketName) &&
+Objects.equals(blockVol, that.blockVol) &&
+Objects.equals(keyName, that.keyName);
+  }
+
+  @Override
+  public int hashCode() {
+return Objects.hash(bucketName, blockVol, keyName);
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
new file mode 100644
index 000..29a06be
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.fsck;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.utils.db.Table;
+import o

hadoop git commit: HDDS-870. Avoid creating block sized buffer in ChunkGroupOutputStream. Contributed by Shashikant Banerjee.

2018-12-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5e773efd7 -> 1afba83f2


HDDS-870. Avoid creating block sized buffer in ChunkGroupOutputStream. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1afba83f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1afba83f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1afba83f

Branch: refs/heads/trunk
Commit: 1afba83f2cd74ae54b558101d235f237ccf454c0
Parents: 5e773ef
Author: Jitendra Pandey 
Authored: Sat Dec 8 09:23:10 2018 -0800
Committer: Jitendra Pandey 
Committed: Sat Dec 8 09:23:10 2018 -0800

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   6 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java | 110 +++--
 .../hdds/scm/storage/ChunkOutputStream.java | 426 ---
 .../hdds/scm/XceiverClientAsyncReply.java   |  44 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  19 +-
 .../scm/storage/ContainerProtocolCalls.java |   1 -
 .../common/src/main/resources/ozone-default.xml |   8 +-
 .../server/ratis/DispatcherContext.java |   4 +-
 .../ozone/client/io/ChunkGroupOutputStream.java | 119 +++---
 .../rpc/TestCloseContainerHandlingByClient.java |  19 +-
 .../client/rpc/TestFailureHandlingByClient.java |  56 ++-
 .../hadoop/ozone/om/TestChunkStreams.java   |  78 
 12 files changed, 505 insertions(+), 385 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1afba83f/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index a824c29..8bdbd1e 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -290,12 +290,16 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   }
 
   @Override
-  public void watchForCommit(long index, long timeout)
+  public long watchForCommit(long index, long timeout)
   throws InterruptedException, ExecutionException, TimeoutException,
   IOException {
 // there is no notion of watch for commit index in standalone pipeline
+return 0;
   };
 
+  public long getReplicatedMinCommitIndex() {
+return 0;
+  }
   /**
* Returns pipeline Type.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1afba83f/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 28d3e7a..b1a70c0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.ratis.proto.RaftProtos;
 import org.apache.ratis.protocol.RaftRetryFailureException;
 import org.apache.ratis.retry.RetryPolicy;
@@ -42,15 +44,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Objects;
-import java.util.Collection;
+import java.util.*;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * An abstract implementation of {@link XceiverClientSpi} using Ratis.
@@ -79,6 +80,12 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   private final int maxOutstandingRequests;
   private final RetryPolicy retryPolicy;
 
+  // Map to track commit index at every server
+  private final ConcurrentHashMap commitInfoMap;
+
+  // create a separate RaftClient for watchForCommit API
+  private RaftClient watchClient;
+
   /**
* Constructs a client.
*/
@@ -89,6 +96,30 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 this.rpcType = rpcType;
 this.maxOutstandingRequests = maxOutStandingChunks;
 this.retryPolicy = retryPolicy;
+commitInfoMap = new ConcurrentHash

hadoop git commit: HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by Shashikant Banerjee.

2018-11-12 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f9c4f32e -> f944f3383


HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f944f338
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f944f338
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f944f338

Branch: refs/heads/trunk
Commit: f944f3383246450a1aa2b34f55f99a9e86e10c42
Parents: 1f9c4f3
Author: Jitendra Pandey 
Authored: Mon Nov 12 14:08:39 2018 -0800
Committer: Jitendra Pandey 
Committed: Mon Nov 12 14:08:39 2018 -0800

--
 .../helpers/ContainerNotOpenException.java  |  36 +++
 .../helpers/InvalidContainerStateException.java |  35 ++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/common/impl/HddsDispatcher.java   | 106 ---
 .../common/interfaces/ContainerDispatcher.java  |  10 ++
 .../CloseContainerCommandHandler.java   |  28 +++--
 .../server/ratis/ContainerStateMachine.java |  11 ++
 .../container/keyvalue/KeyValueHandler.java |  33 +++---
 .../ozone/client/io/ChunkGroupOutputStream.java |  14 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |   2 +-
 .../rpc/TestContainerStateMachineFailures.java  |   6 +-
 .../transport/server/ratis/TestCSMMetrics.java  |   6 ++
 .../container/server/TestContainerServer.java   |   6 ++
 13 files changed, 255 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
new file mode 100644
index 000..4e406e6
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+
+/**
+ * Exceptions thrown when a write/update opearation is done on non-open
+ * container.
+ */
+public class ContainerNotOpenException extends StorageContainerException {
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   */
+  public ContainerNotOpenException(String message) {
+super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
new file mode 100644
index 000..1378d1a
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with th

hadoop git commit: HDDS-651. Rename o3 to o3fs for Filesystem.

2018-10-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 51a79cc71 -> 106be1416


HDDS-651. Rename o3 to o3fs for Filesystem.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/106be141
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/106be141
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/106be141

Branch: refs/heads/ozone-0.3
Commit: 106be1416dfe6e6bbf90d3653edca5eb2ef54d44
Parents: 51a79cc
Author: Jitendra Pandey 
Authored: Wed Oct 17 14:18:46 2018 -0700
Committer: Jitendra Pandey 
Committed: Wed Oct 17 14:32:22 2018 -0700

--
 .../src/main/resources/core-default.xml | 13 -
 .../conf/TestCommonConfigurationFields.java |  2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 +-
 .../dist/src/main/compose/ozonefs/docker-config |  2 +-
 .../src/main/smoketest/ozonefs/ozonefs.robot| 52 ++--
 hadoop-ozone/docs/content/OzoneFS.md|  6 +--
 .../hadoop/ozone/web/ozShell/Handler.java   |  6 +--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |  2 +-
 .../fs/ozone/TestOzoneFileInterfaces.java   |  2 +-
 9 files changed, 40 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f3167f2..c51030b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1772,19 +1772,6 @@
 
 
 
-
-
-  fs.o3.impl
-  org.apache.hadoop.fs.ozone.OzoneFileSystem
-  The implementation class of the Ozone FileSystem.
-
-
-
-  fs.AbstractFileSystem.o3.impl
-  org.apache.hadoop.fs.ozone.OzFs
-  The implementation class of the OzFs 
AbstractFileSystem.
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index e10617d..50af230 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -102,7 +102,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3a.");
 
 // O3 properties are in a different subtree.
-xmlPrefixToSkipCompare.add("fs.o3.");
+xmlPrefixToSkipCompare.add("fs.o3fs.");
 
 //ftp properties are in a different subtree.
 // - org.apache.hadoop.fs.ftp.FTPFileSystem.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..b77d621 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -63,7 +63,10 @@ public final class OzoneConsts {
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
-  public static final String OZONE_URI_SCHEME = "o3";
+  // Ozone File System scheme
+  public static final String OZONE_URI_SCHEME = "o3fs";
+
+  public static final String OZONE_RPC_SCHEME = "o3";
   public static final String OZONE_HTTP_SCHEME = "http";
   public static final String OZONE_URI_DELIMITER = "/";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106be141/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 675dcba..5061afa 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -14,7 +14,7 @@
 # See the License for the sp

hadoop git commit: HDDS-651. Rename o3 to o3fs for Filesystem.

2018-10-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9abda8394 -> d93d515af


HDDS-651. Rename o3 to o3fs for Filesystem.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93d515a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93d515a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93d515a

Branch: refs/heads/trunk
Commit: d93d515af50055f7743d8fffd563268416d05212
Parents: 9abda83
Author: Jitendra Pandey 
Authored: Wed Oct 17 14:18:46 2018 -0700
Committer: Jitendra Pandey 
Committed: Wed Oct 17 14:19:17 2018 -0700

--
 .../src/main/resources/core-default.xml | 13 -
 .../conf/TestCommonConfigurationFields.java |  2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 +-
 .../dist/src/main/compose/ozonefs/docker-config |  2 +-
 .../src/main/smoketest/ozonefs/ozonefs.robot| 52 ++--
 hadoop-ozone/docs/content/OzoneFS.md|  6 +--
 .../hadoop/ozone/web/ozShell/Handler.java   |  6 +--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |  2 +-
 .../fs/ozone/TestOzoneFileInterfaces.java   |  2 +-
 9 files changed, 40 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 599396f..ce3a407 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1788,19 +1788,6 @@
 
 
 
-
-
-  fs.o3.impl
-  org.apache.hadoop.fs.ozone.OzoneFileSystem
-  The implementation class of the Ozone FileSystem.
-
-
-
-  fs.AbstractFileSystem.o3.impl
-  org.apache.hadoop.fs.ozone.OzFs
-  The implementation class of the OzFs 
AbstractFileSystem.
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 2766b56..3a4bcce 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,7 +103,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3a.");
 
 // O3 properties are in a different subtree.
-xmlPrefixToSkipCompare.add("fs.o3.");
+xmlPrefixToSkipCompare.add("fs.o3fs.");
 
 //ftp properties are in a different subtree.
 // - org.apache.hadoop.fs.ftp.FTPFileSystem.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..b77d621 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -63,7 +63,10 @@ public final class OzoneConsts {
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
-  public static final String OZONE_URI_SCHEME = "o3";
+  // Ozone File System scheme
+  public static final String OZONE_URI_SCHEME = "o3fs";
+
+  public static final String OZONE_RPC_SCHEME = "o3";
   public static final String OZONE_HTTP_SCHEME = "http";
   public static final String OZONE_URI_DELIMITER = "/";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93d515a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 675dcba..5061afa 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -14,7 +14,7 @@
 # See the License for the sp

hadoop git commit: HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.

2018-10-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 0cc5bc110 -> e4dfdcee9


HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4dfdcee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4dfdcee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4dfdcee

Branch: refs/heads/ozone-0.3
Commit: e4dfdcee9f5312ba716b8297778db8bd3434db2d
Parents: 0cc5bc1
Author: Jitendra Pandey 
Authored: Tue Oct 16 10:34:16 2018 -0700
Committer: Jitendra Pandey 
Committed: Tue Oct 16 10:36:37 2018 -0700

--
 .../hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java   | 4 ++--
 .../test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dfdcee/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 7fa0cfb..67cda9f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -53,6 +53,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 public class BlockManagerImpl implements BlockManager {
 
   static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+  private static byte[] blockCommitSequenceIdKey =
+  DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 
   private Configuration config;
 
@@ -89,8 +91,6 @@ public class BlockManagerImpl implements BlockManager {
 Preconditions.checkNotNull(db, "DB cannot be null here");
 
 long blockCommitSequenceId = data.getBlockCommitSequenceId();
-byte[] blockCommitSequenceIdKey =
-DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
 
 // default blockCommitSequenceId for any block is 0. It the putBlock

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dfdcee/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 71a4bef..ae52451 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
@@ -283,6 +284,7 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   scm.getClientProtocolServer().getScmInfo().getClusterId()));
   stop();
   FileUtils.deleteDirectory(baseDir);
+  ContainerCache.getInstance(conf).shutdownCache();
 } catch (IOException e) {
   LOG.error("Exception while shutting down the cluster.", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.

2018-10-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25f8fcb06 -> 53e5173bd


HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e5173b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e5173b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e5173b

Branch: refs/heads/trunk
Commit: 53e5173bd1d970ec1c714568cbdb1c0dfd0fc6fb
Parents: 25f8fcb
Author: Jitendra Pandey 
Authored: Tue Oct 16 10:34:16 2018 -0700
Committer: Jitendra Pandey 
Committed: Tue Oct 16 10:34:51 2018 -0700

--
 .../hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java   | 4 ++--
 .../test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 7fa0cfb..67cda9f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -53,6 +53,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 public class BlockManagerImpl implements BlockManager {
 
   static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+  private static byte[] blockCommitSequenceIdKey =
+  DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 
   private Configuration config;
 
@@ -89,8 +91,6 @@ public class BlockManagerImpl implements BlockManager {
 Preconditions.checkNotNull(db, "DB cannot be null here");
 
 long blockCommitSequenceId = data.getBlockCommitSequenceId();
-byte[] blockCommitSequenceIdKey =
-DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
 
 // default blockCommitSequenceId for any block is 0. It the putBlock

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 71a4bef..ae52451 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
@@ -283,6 +284,7 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   scm.getClientProtocolServer().getScmInfo().getClusterId()));
   stop();
   FileUtils.deleteDirectory(baseDir);
+  ContainerCache.getInstance(conf).shutdownCache();
 } catch (IOException e) {
   LOG.error("Exception while shutting down the cluster.", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDDS-603. Add BlockCommitSequenceId field per Container and expose it in Container Reports. Contributed by Shashikant Banerjee.

2018-10-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 abfff4ccc -> b51bc6b93


HDDS-603. Add BlockCommitSequenceId field per Container and expose it in 
Container Reports. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00083579
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00083579
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00083579

Branch: refs/heads/ozone-0.3
Commit: 0008357940d4bdb4c9162e15a0967bd287999a97
Parents: abfff4c
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:15:42 2018 -0700
Committer: Jitendra Pandey 
Committed: Mon Oct 15 13:30:39 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 ---
 .../common/helpers/KeyValueContainerReport.java | 117 ---
 .../container/common/interfaces/Container.java  |   5 +
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 ++
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 +
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 +
 12 files changed, 208 insertions(+), 328 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the Lic

[2/3] hadoop git commit: HDDS-579. ContainerStateMachine should fail subsequent transactions per container in case one fails. Contributed by Shashikant Banerjee.

2018-10-15 Thread jitendra
HDDS-579. ContainerStateMachine should fail subsequent transactions per 
container in case one fails. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a619d120
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a619d120
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a619d120

Branch: refs/heads/ozone-0.3
Commit: a619d120a6c44bde2a846d61505a94f896e58e46
Parents: 0008357
Author: Jitendra Pandey 
Authored: Sat Oct 13 19:15:01 2018 -0700
Committer: Jitendra Pandey 
Committed: Mon Oct 15 13:30:53 2018 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   4 +-
 .../container/common/impl/HddsDispatcher.java   |  63 +--
 .../container/keyvalue/KeyValueHandler.java |  20 +-
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../rpc/TestContainerStateMachineFailures.java  | 185 +++
 5 files changed, 242 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 662df8f..da55db3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -139,6 +139,7 @@ enum Result {
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
+  CONTAINER_UNHEALTHY = 36;
 }
 
 /**
@@ -161,7 +162,8 @@ enum ContainerLifeCycleState {
 OPEN = 1;
 CLOSING = 2;
 CLOSED = 3;
-INVALID = 4;
+UNHEALTHY = 4;
+INVALID = 5;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index bb5002a..1849841 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -142,6 +142,26 @@ public class HddsDispatcher implements ContainerDispatcher 
{
 responseProto = handler.handle(msg, container);
 if (responseProto != null) {
   metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
+
+  // If the request is of Write Type and the container operation
+  // is unsuccessful, it implies the applyTransaction on the container
+  // failed. All subsequent transactions on the container should fail and
+  // hence replica will be marked unhealthy here. In this case, a close
+  // container action will be sent to SCM to close the container.
+  if (!HddsUtils.isReadOnly(msg)
+  && responseProto.getResult() != ContainerProtos.Result.SUCCESS) {
+// If the container is open and the container operation has failed,
+// it should be first marked unhealthy and the initiate the close
+// container action. This also implies this is the first transaction
+// which has failed, so the container is marked unhealthy right here.
+// Once container is marked unhealthy, all the subsequent write
+// transactions will fail with UNHEALTHY_CONTAINER exception.
+if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
+  container.getContainerData()
+  .setState(ContainerLifeCycleState.UNHEALTHY);
+  sendCloseContainerActionIfNeeded(container);
+}
+  }
   return responseProto;
 } else {
   return ContainerUtils.unsupportedRequest(msg);
@@ -149,31 +169,46 @@ public class HddsDispatcher implements 
ContainerDispatcher {
   }
 
   /**
-   * If the container usage reaches the close threshold we send Close
-   * ContainerAction to SCM.
-   *
+   * If the container usage reaches the close threshold or the container is
+   * marked unhealthy we send Close ContainerAction to SCM.
* @param container current state of container
*/
   private void sendCloseContainerActionIfNeeded(Container container) {
 // We have to find a more efficient way to close a container.
-Boolean isOpen = Optional.ofNullable(container)
+boolean isSpaceFull = isContainerFull(container);
+boolean sho

[3/3] hadoop git commit: HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.

2018-10-15 Thread jitendra
HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b51bc6b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b51bc6b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b51bc6b9

Branch: refs/heads/ozone-0.3
Commit: b51bc6b93ffb41de860a59b3875ce49274287b82
Parents: a619d12
Author: Jitendra Pandey 
Authored: Mon Oct 15 11:52:38 2018 -0700
Committer: Jitendra Pandey 
Committed: Mon Oct 15 13:31:08 2018 -0700

--
 .../container/keyvalue/KeyValueHandler.java |  10 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |  11 +-
 .../keyvalue/impl/BlockManagerImpl.java |  24 +++-
 .../keyvalue/impl/ChunkManagerImpl.java |  43 +++
 .../TestContainerStateMachineIdempotency.java   | 121 +++
 .../common/impl/TestContainerPersistence.java   |  17 +--
 6 files changed, 203 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4c87b19..da77f1c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -241,9 +241,12 @@ public class KeyValueHandler extends Handler {
 newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
 containerSet.addContainer(newContainer);
   } else {
-throw new StorageContainerException("Container already exists with " +
-"container Id " + containerID, ContainerProtos.Result
-.CONTAINER_EXISTS);
+
+// The create container request for an already existing container can
+// arrive in case the ContainerStateMachine reapplies the transaction
+// on datanode restart. Just log a warning msg here.
+LOG.warn("Container already exists." +
+"container Id " + containerID);
   }
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -370,6 +373,7 @@ public class KeyValueHandler extends Handler {
 
   /**
* Handles Close Container Request. An open container is closed.
+   * Close Container call is idempotent.
*/
   ContainerCommandResponseProto handleCloseContainer(
   ContainerCommandRequestProto request, KeyValueContainer kvContainer) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 8bdae0f..20598d9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -231,21 +231,18 @@ public final class ChunkUtils {
*
* @param chunkFile - chunkFile to write data into.
* @param info - chunk info.
-   * @return boolean isOverwrite
-   * @throws StorageContainerException
+   * @return true if the chunkFile exists and chunkOffset < chunkFile length,
+   * false otherwise.
*/
   public static boolean validateChunkForOverwrite(File chunkFile,
-  ChunkInfo info) throws StorageContainerException {
+  ChunkInfo info) {
 
 Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
 
 if (isOverWriteRequested(chunkFile, info)) {
   if (!isOverWritePermitted(info)) {
-log.error("Rejecting write chunk request. Chunk overwrite " +
+log.warn("Duplicate write chunk request. Chunk overwrite " +
 "without explicit request. {}", info.toString());
-throw new StorageContainerException("Rejecting write chunk request. " +
-"OverWrite flag required." + info.toString(),
-OVERWRITE_FLAG_REQUIRED);
   }
   return true;
 }

http://git-wip-us.apa

hadoop git commit: HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.

2018-10-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk b6fc72a02 -> e13a38f4b


HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e13a38f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e13a38f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e13a38f4

Branch: refs/heads/trunk
Commit: e13a38f4bc358666e64687636cf7b025bce83b46
Parents: b6fc72a
Author: Jitendra Pandey 
Authored: Mon Oct 15 11:52:38 2018 -0700
Committer: Jitendra Pandey 
Committed: Mon Oct 15 11:52:38 2018 -0700

--
 .../container/keyvalue/KeyValueHandler.java |  10 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |  11 +-
 .../keyvalue/impl/BlockManagerImpl.java |  24 +++-
 .../keyvalue/impl/ChunkManagerImpl.java |  43 +++
 .../TestContainerStateMachineIdempotency.java   | 121 +++
 .../common/impl/TestContainerPersistence.java   |  17 +--
 6 files changed, 203 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e13a38f4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4c87b19..da77f1c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -241,9 +241,12 @@ public class KeyValueHandler extends Handler {
 newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
 containerSet.addContainer(newContainer);
   } else {
-throw new StorageContainerException("Container already exists with " +
-"container Id " + containerID, ContainerProtos.Result
-.CONTAINER_EXISTS);
+
+// The create container request for an already existing container can
+// arrive in case the ContainerStateMachine reapplies the transaction
+// on datanode restart. Just log a warning msg here.
+LOG.warn("Container already exists." +
+"container Id " + containerID);
   }
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -370,6 +373,7 @@ public class KeyValueHandler extends Handler {
 
   /**
* Handles Close Container Request. An open container is closed.
+   * Close Container call is idempotent.
*/
   ContainerCommandResponseProto handleCloseContainer(
   ContainerCommandRequestProto request, KeyValueContainer kvContainer) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e13a38f4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 8bdae0f..20598d9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -231,21 +231,18 @@ public final class ChunkUtils {
*
* @param chunkFile - chunkFile to write data into.
* @param info - chunk info.
-   * @return boolean isOverwrite
-   * @throws StorageContainerException
+   * @return true if the chunkFile exists and chunkOffset < chunkFile length,
+   * false otherwise.
*/
   public static boolean validateChunkForOverwrite(File chunkFile,
-  ChunkInfo info) throws StorageContainerException {
+  ChunkInfo info) {
 
 Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
 
 if (isOverWriteRequested(chunkFile, info)) {
   if (!isOverWritePermitted(info)) {
-log.error("Rejecting write chunk request. Chunk overwrite " +
+log.warn("Duplicate write chunk request. Chunk overwrite " +
 "without explicit request. {}", info.toString());
-throw new StorageContainerException("Rejecting write chunk request. " +
-"OverWrite flag required." + info.toString(),
-OVERW

hadoop git commit: HDDS-579. ContainerStateMachine should fail subsequent transactions per container in case one fails. Contributed by Shashikant Banerjee.

2018-10-13 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5209c7503 -> 603649d3a


HDDS-579. ContainerStateMachine should fail subsequent transactions per 
container in case one fails. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603649d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603649d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603649d3

Branch: refs/heads/trunk
Commit: 603649d3a9ff12b725d06f5f317966de9a59fe70
Parents: 5209c75
Author: Jitendra Pandey 
Authored: Sat Oct 13 19:15:01 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 19:15:01 2018 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   4 +-
 .../container/common/impl/HddsDispatcher.java   |  63 +--
 .../container/keyvalue/KeyValueHandler.java |  20 +-
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../rpc/TestContainerStateMachineFailures.java  | 185 +++
 5 files changed, 242 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/603649d3/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 662df8f..da55db3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -139,6 +139,7 @@ enum Result {
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
+  CONTAINER_UNHEALTHY = 36;
 }
 
 /**
@@ -161,7 +162,8 @@ enum ContainerLifeCycleState {
 OPEN = 1;
 CLOSING = 2;
 CLOSED = 3;
-INVALID = 4;
+UNHEALTHY = 4;
+INVALID = 5;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603649d3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index bb5002a..1849841 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -142,6 +142,26 @@ public class HddsDispatcher implements ContainerDispatcher 
{
 responseProto = handler.handle(msg, container);
 if (responseProto != null) {
   metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
+
+  // If the request is of Write Type and the container operation
+  // is unsuccessful, it implies the applyTransaction on the container
+  // failed. All subsequent transactions on the container should fail and
+  // hence replica will be marked unhealthy here. In this case, a close
+  // container action will be sent to SCM to close the container.
+  if (!HddsUtils.isReadOnly(msg)
+  && responseProto.getResult() != ContainerProtos.Result.SUCCESS) {
+// If the container is open and the container operation has failed,
+// it should be first marked unhealthy and the initiate the close
+// container action. This also implies this is the first transaction
+// which has failed, so the container is marked unhealthy right here.
+// Once container is marked unhealthy, all the subsequent write
+// transactions will fail with UNHEALTHY_CONTAINER exception.
+if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
+  container.getContainerData()
+  .setState(ContainerLifeCycleState.UNHEALTHY);
+  sendCloseContainerActionIfNeeded(container);
+}
+  }
   return responseProto;
 } else {
   return ContainerUtils.unsupportedRequest(msg);
@@ -149,31 +169,46 @@ public class HddsDispatcher implements 
ContainerDispatcher {
   }
 
   /**
-   * If the container usage reaches the close threshold we send Close
-   * ContainerAction to SCM.
-   *
+   * If the container usage reaches the close threshold or the container is
+   * marked unhealthy we send Close ContainerAction to SCM.
* @param container current state of container
*/
   private void sendCloseContainerActionIfNeeded(Container container) {
 // We have to find a more efficient way to close a container.
-Boolean isOpen = Optional.ofNullabl

[2/2] hadoop git commit: HDDS-603. Add BlockCommitSequenceId field per Container and expose it in Container Reports. Contributed by Shashikant Banerjee.

2018-10-13 Thread jitendra
HDDS-603. Add BlockCommitSequenceId field per Container and expose it in 
Container Reports. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5209c750
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5209c750
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5209c750

Branch: refs/heads/trunk
Commit: 5209c7503bee7849d134c16213133b4fa0c330f6
Parents: 22f37af
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:15:42 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:15:42 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 ---
 .../common/helpers/KeyValueContainerReport.java | 117 ---
 .../container/common/interfaces/Container.java  |   5 +
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 ++
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 +
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 +
 12 files changed, 208 insertions(+), 328 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS 

[1/2] hadoop git commit: Revert "HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee."

2018-10-13 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0473b6800 -> 5209c7503


Revert "HDDS-629. Make ApplyTransaction calls in ContainerStateMachine 
idempotent. Contributed by Shashikant Banerjee."

This reverts commit 0473b6817cfe4f03acdcb0eedc78b509244f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22f37af9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22f37af9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22f37af9

Branch: refs/heads/trunk
Commit: 22f37af93583e9c63e03f0781ffb903e35544559
Parents: 0473b68
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:14:39 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:14:39 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 -
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 +++
 .../common/helpers/KeyValueContainerReport.java | 117 +++
 .../container/common/interfaces/Container.java  |   5 -
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 --
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 -
 .../StorageContainerDatanodeProtocol.proto  |   1 -
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 -
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 -
 12 files changed, 328 insertions(+), 208 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..923271c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,7 +113,6 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index 04c87ae..a3430f8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,8 +42,7 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
new file mode 100644
index 000..a4c1f2f
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or ag

hadoop git commit: HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.

2018-10-13 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9227f3d22 -> 0473b6800


HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0473b680
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0473b680
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0473b680

Branch: refs/heads/trunk
Commit: 0473b6817cfe4f03acdcb0eedc78b509244f
Parents: 9227f3d
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:12:08 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:12:08 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 ---
 .../common/helpers/KeyValueContainerReport.java | 117 ---
 .../container/common/interfaces/Container.java  |   5 +
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 ++
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 +
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 +
 12 files changed, 208 insertions(+), 328 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "

hadoop git commit: HDDS-550. Serialize ApplyTransaction calls per Container in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-10-11 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 72ac4e823 -> 077d36d8f


HDDS-550. Serialize ApplyTransaction calls per Container in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/077d36d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/077d36d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/077d36d8

Branch: refs/heads/ozone-0.3
Commit: 077d36d8f5a9e24fd4a48ffd06b6bdaf8eee7de6
Parents: 72ac4e8
Author: Jitendra Pandey 
Authored: Thu Oct 11 16:59:59 2018 -0700
Committer: Jitendra Pandey 
Committed: Thu Oct 11 17:15:08 2018 -0700

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   6 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |   4 +-
 .../hdds/scm/storage/ChunkInputStream.java  |   2 +-
 .../hdds/scm/storage/ChunkOutputStream.java |   3 +-
 hadoop-hdds/common/pom.xml  |   4 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   6 +-
 .../scm/storage/ContainerProtocolCalls.java |   2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   3 +-
 .../org/apache/hadoop/utils/db/RDBStore.java|   3 +-
 .../main/java/org/apache/ratis/RatisHelper.java |  14 +-
 .../main/proto/DatanodeContainerProtocol.proto  |   1 -
 .../common/src/main/resources/ozone-default.xml |   8 +
 .../transport/server/GrpcXceiverService.java|   2 +-
 .../transport/server/XceiverServerGrpc.java |   8 +-
 .../server/ratis/ContainerStateMachine.java | 440 +--
 .../server/ratis/XceiverServerRatis.java|  15 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |   2 +-
 .../keyvalue/helpers/SmallFileUtils.java|   2 +-
 .../background/BlockDeletingService.java|   2 +-
 .../replication/GrpcReplicationClient.java  |   6 +-
 .../replication/GrpcReplicationService.java |   4 +-
 .../common/impl/TestHddsDispatcher.java |   2 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   5 +
 .../rpc/TestCloseContainerHandlingByClient.java |   1 -
 .../ozone/container/ContainerTestHelper.java|   3 +-
 .../common/impl/TestCloseContainerHandler.java  |   2 +-
 .../container/server/TestContainerServer.java   |   1 -
 .../server/TestContainerStateMachine.java   | 201 -
 .../genesis/BenchMarkDatanodeDispatcher.java|   2 +-
 hadoop-project/pom.xml  |   2 +-
 31 files changed, 192 insertions(+), 570 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/077d36d8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index d353e7a..2f11872 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -31,9 +31,9 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.util.Time;
-import org.apache.ratis.shaded.io.grpc.ManagedChannel;
-import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
+import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
+import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/077d36d8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0d301d9..4efe7ba 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hdds.scm;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.shaded.com.google.protobuf
+import org.apache.ratis.thirdparty.com.google.protobuf
 .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configurat

hadoop git commit: HDDS-550. Serialize ApplyTransaction calls per Container in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-10-11 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8d1981806 -> 0bc6d0484


HDDS-550. Serialize ApplyTransaction calls per Container in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc6d048
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc6d048
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc6d048

Branch: refs/heads/trunk
Commit: 0bc6d0484a1c3652234b76e4f99e2ef99173796d
Parents: 8d19818
Author: Jitendra Pandey 
Authored: Thu Oct 11 16:59:59 2018 -0700
Committer: Jitendra Pandey 
Committed: Thu Oct 11 17:12:47 2018 -0700

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   6 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |   4 +-
 .../hdds/scm/storage/ChunkInputStream.java  |   2 +-
 .../hdds/scm/storage/ChunkOutputStream.java |   3 +-
 hadoop-hdds/common/pom.xml  |   4 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   6 +-
 .../scm/storage/ContainerProtocolCalls.java |   2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   3 +-
 .../org/apache/hadoop/utils/db/RDBStore.java|   3 +-
 .../main/java/org/apache/ratis/RatisHelper.java |  14 +-
 .../main/proto/DatanodeContainerProtocol.proto  |   1 -
 .../common/src/main/resources/ozone-default.xml |   8 +
 .../transport/server/GrpcXceiverService.java|   2 +-
 .../transport/server/XceiverServerGrpc.java |   8 +-
 .../server/ratis/ContainerStateMachine.java | 440 +--
 .../server/ratis/XceiverServerRatis.java|  15 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |   2 +-
 .../keyvalue/helpers/SmallFileUtils.java|   2 +-
 .../background/BlockDeletingService.java|   2 +-
 .../replication/GrpcReplicationClient.java  |   6 +-
 .../replication/GrpcReplicationService.java |   4 +-
 .../common/impl/TestHddsDispatcher.java |   2 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   5 +
 .../rpc/TestCloseContainerHandlingByClient.java |   1 -
 .../ozone/container/ContainerTestHelper.java|   3 +-
 .../common/impl/TestCloseContainerHandler.java  |   2 +-
 .../container/server/TestContainerServer.java   |   1 -
 .../server/TestContainerStateMachine.java   | 201 -
 .../genesis/BenchMarkDatanodeDispatcher.java|   2 +-
 hadoop-project/pom.xml  |   2 +-
 31 files changed, 192 insertions(+), 570 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d048/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index d353e7a..2f11872 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -31,9 +31,9 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.util.Time;
-import org.apache.ratis.shaded.io.grpc.ManagedChannel;
-import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
+import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
+import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d048/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0d301d9..4efe7ba 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hdds.scm;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.shaded.com.google.protobuf
+import org.apache.ratis.thirdparty.com.google.protobuf
 .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
 imp

hadoop git commit: HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. Contributed by Namit Maheshwari.

2018-09-14 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.2 3ade889e5 -> 5d47072b4


HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. 
Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d47072b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d47072b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d47072b

Branch: refs/heads/ozone-0.2
Commit: 5d47072b43c364dbd90f57ab10223cb87673909f
Parents: 3ade889
Author: Jitendra Pandey 
Authored: Fri Sep 14 11:38:50 2018 -0700
Committer: Jitendra Pandey 
Committed: Fri Sep 14 11:58:38 2018 -0700

--
 .../hadoop/hdds/scm/server/StorageContainerManager.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d47072b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 28a4983..b3408a4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -361,8 +361,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 hParser.printGenericCommandUsage(System.err);
 System.exit(1);
   }
-  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-  LOG);
   StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), 
conf);
   if (scm != null) {
 scm.start();
@@ -395,9 +393,13 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 }
 switch (startOpt) {
 case INIT:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   terminate(scmInit(conf) ? 0 : 1);
   return null;
 case GENCLUSTERID:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   System.out.println("Generating new cluster id:");
   System.out.println(StorageInfo.newClusterID());
   terminate(0);
@@ -407,6 +409,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   terminate(0);
   return null;
 default:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   return new StorageContainerManager(conf);
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. Contributed by Namit Maheshwari.

2018-09-14 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0c8a43b9e -> 446cb8301


HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. 
Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/446cb830
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/446cb830
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/446cb830

Branch: refs/heads/trunk
Commit: 446cb8301ebd651879dfd403d5fa342b4dfaf6e3
Parents: 0c8a43b
Author: Jitendra Pandey 
Authored: Fri Sep 14 11:38:50 2018 -0700
Committer: Jitendra Pandey 
Committed: Fri Sep 14 11:39:36 2018 -0700

--
 .../hadoop/hdds/scm/server/StorageContainerManager.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/446cb830/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 28a4983..b3408a4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -361,8 +361,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 hParser.printGenericCommandUsage(System.err);
 System.exit(1);
   }
-  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-  LOG);
   StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), 
conf);
   if (scm != null) {
 scm.start();
@@ -395,9 +393,13 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 }
 switch (startOpt) {
 case INIT:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   terminate(scmInit(conf) ? 0 : 1);
   return null;
 case GENCLUSTERID:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   System.out.println("Generating new cluster id:");
   System.out.println(StorageInfo.newClusterID());
   terminate(0);
@@ -407,6 +409,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   terminate(0);
   return null;
 default:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   return new StorageContainerManager(conf);
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap.min.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap.min.js
deleted file mode 100644
index 0e668e8..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap.min.js
+++ /dev/null
@@ -1,9 +0,0 @@
-/*!
- * Bootstrap v3.0.2 by @fat and @mdo
- * Copyright 2013 Twitter, Inc.
- * Licensed under http://www.apache.org/licenses/LICENSE-2.0
- *
- * Designed and built with all the love in the world by @mdo and @fat.
- */
-
-if("undefined"==typeof jQuery)throw new Error("Bootstrap requires 
jQuery");+function(a){"use strict";function b(){var 
a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd
 otransitionend",transition:"transitionend"};for(var c in b)if(void 
0!==a.style[c])return{end:b[c]}}a.fn.emulateTransitionEnd=function(b){var 
c=!1,d=this;a(this).one(a.support.transition.end,function(){c=!0});var 
e=function(){c||a(d).trigger(a.support.transition.end)};return 
setTimeout(e,b),this},a(function(){a.support.transition=b()})}(jQuery),+function(a){"use
 strict";var 
b='[data-dismiss="alert"]',c=function(c){a(c).on("click",b,this.close)};c.prototype.close=function(b){function
 c(){f.trigger("closed.bs.alert").remove()}var 
d=a(this),e=d.attr("data-target");e||(e=d.attr("href"),e=e&(/.*(?=#[^\s]*$)/,""));var
 
f=a(e);b&(),f.length||(f=d.hasClass("alert")?d:d.parent()),f.trigger(b=a.Event("close.bs.alert"
 
)),b.isDefaultPrevented()||(f.removeClass("in"),a.support.transition&("fade")?f.one(a.support.transition.end,c).emulateTransitionEnd(150):c())};var
 d=a.fn.alert;a.fn.alert=function(b){return this.each(function(){var 
d=a(this),e=d.data("bs.alert");e||d.data("bs.alert",e=new 
c(this)),"string"==typeof 
b&[b].call(d)})},a.fn.alert.Constructor=c,a.fn.alert.noConflict=function(){return
 
a.fn.alert=d,this},a(document).on("click.bs.alert.data-api",b,c.prototype.close)}(jQuery),+function(a){"use
 strict";var 
b=function(c,d){this.$element=a(c),this.options=a.extend({},b.DEFAULTS,d)};b.DEFAULTS={loadingText:"loading..."},b.prototype.setState=function(a){var
 
b="disabled",c=this.$element,d=c.is("input")?"val":"html",e=c.data();a+="Text",e.resetText||c.data("resetText",c[d]()),c[d](e[a]||this.options[a]),setTimeout(function(){"loadingText"==a?c.addClass(b).attr(b,b):c.removeClass(b).removeAttr(b)},0)},b.prototype.toggle=function(){var
 a=this.$element.closest('[data-toggle="buttons"]');i
 f(a.length){var 
b=this.$element.find("input").prop("checked",!this.$element.hasClass("active")).trigger("change");"radio"===b.prop("type")&(".active").removeClass("active")}this.$element.toggleClass("active")};var
 c=a.fn.button;a.fn.button=function(c){return this.each(function(){var 
d=a(this),e=d.data("bs.button"),f="object"==typeof 
c&e||d.data("bs.button",e=new 
b(this,f)),"toggle"==c?e.toggle():c&(c)})},a.fn.button.Constructor=b,a.fn.button.noConflict=function(){return
 
a.fn.button=c,this},a(document).on("click.bs.button.data-api","[data-toggle^=button]",function(b){var
 
c=a(b.target);c.hasClass("btn")||(c=c.closest(".btn")),c.button("toggle"),b.preventDefault()})}(jQuery),+function(a){"use
 strict";var 
b=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,"hover"==this.options.pause&$element.on("mouseenter",a.proxy(this.pause,this)).o
 
n("mouseleave",a.proxy(this.cycle,this))};b.DEFAULTS={interval:5e3,pause:"hover",wrap:!0},b.prototype.cycle=function(b){return
 
b||(this.paused=!1),this.interval&(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},b.prototype.getActiveIndex=function(){return
 
this.$active=this.$element.find(".item.active"),this.$items=this.$active.parent().children(),this.$items.index(this.$active)},b.prototype.to=function(b){var
 c=this,d=this.getActiveIndex();return b>this.$items.length-1||0>b?void 
0:this.sliding?this.$element.one("slid",function(){c.to(b)}):d==b?this.pause().cycle():this.slide(b>d?"next":"prev",a(this.$items[b]))},b.prototype.pause=function(b){return
 b||(this.paused=!0),this.$element.find(".next, 
.prev").length&&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},b.prototype.next=function(){return
 this
 .sliding?void 0:this.slide("next")},b.prototype.prev=function(){return 
this.sliding?void 0:this.slide("prev")},b.prototype.slide=function(b,c){var 

[07/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
index 05199bd..9068992 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
@@ -1,7 +1,5 @@
 //! moment.js
-//! version : 2.10.3
-//! authors : Tim Wood, Iskren Chernev, Moment.js contributors
+//! version : 2.22.1
 //! license : MIT
 //! momentjs.com
-!function(a,b){"object"==typeof exports&&"undefined"!=typeof 
module?module.exports=b():"function"==typeof 
define&?define(b):a.moment=b()}(this,function(){"use 
strict";function a(){return Dc.apply(null,arguments)}function 
b(a){Dc=a}function c(a){return"[object 
Array]"===Object.prototype.toString.call(a)}function d(a){return a instanceof 
Date||"[object Date]"===Object.prototype.toString.call(a)}function e(a,b){var 
c,d=[];for(c=0;c0)for(c in 
Fc)d=Fc[c],e=b[d],"undefined"!=typeof e&&(a[d]=e);return a}function 
n(b){m(this,b),this._d=new Date(+b._d),Gc===!1&&(Gc=!0,a.updateOff
 set(this),Gc=!1)}function o(a){return a instanceof 
n||null!=a&!=a._isAMomentObject}function p(a){var b=+a,c=0;return 
0!==b&(b)&&(c=b>=0?Math.floor(b):Math.ceil(b)),c}function q(a,b,c){var 
d,e=Math.min(a.length,b.length),f=Math.abs(a.length-b.length),g=0;for(d=0;e>d;d++)(c&[d]!==b[d]||!c&(a[d])!==p(b[d]))&++;return
 g+f}function r(){}function s(a){return 
a?a.toLowerCase().replace("_","-"):a}function t(a){for(var 
b,c,d,e,f=0;f0;){if(d=u(e.slice(0,b).join("-")))return
 d;if(c&>=b&(e,c,!0)>=b-1)break;b--}f++}return null}function 
u(a){var b=null;if(!Hc[a]&&"undefined"!=typeof 
module&&)try{b=Ec._abbr,require("./locale/"+a),v(b)}catch(c){}return
 Hc[a]}function v(a,b){var c;return a&&(c="undefined"==typeof 
b?x(a):w(a,b),c&&(Ec=c)),Ec._abbr}function w(a,b){return 
null!==b?(b.abbr=a,Hc[a]||(Hc[a]=new r),Hc[a].set(b),v(a),Hc[a]):(delete 
Hc[a],null)}function x(a
 ){var b;if(a&_locale&_locale._abbr&&(a=a._locale._abbr),!a)return 
Ec;if(!c(a)){if(b=u(a))return b;a=[a]}return t(a)}function y(a,b){var 
c=a.toLowerCase();Ic[c]=Ic[c+"s"]=Ic[b]=a}function z(a){return"string"==typeof 
a?Ic[a]||Ic[a.toLowerCase()]:void 0}function A(a){var b,c,d={};for(c in 
a)f(a,c)&&(b=z(c),b&&(d[b]=a[c]));return d}function B(b,c){return 
function(d){return 
null!=d?(D(this,b,d),a.updateOffset(this,c),this):C(this,b)}}function 
C(a,b){return a._d["get"+(a._isUTC?"UTC":"")+b]()}function D(a,b,c){return 
a._d["set"+(a._isUTC?"UTC":"")+b](c)}function E(a,b){var c;if("object"==typeof 
a)for(c in a)this.set(c,a[c]);else if(a=z(a),"function"==typeof this[a])return 
this[a](b);return this}function F(a,b,c){for(var 
d=""+Math.abs(a),e=a>=0;d.lengthb;b++)Mc[d[b]]?d[b]=Mc[d[b]]:d[b]=H(d[b]);return
 function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof 
Function?d[b].call(e,a):d[b];return f}}function J(a,b){return 
a.isValid()?(b=K(b,a.localeData()),Lc[b]||(Lc[b]=I(b)),Lc[b](a)):a.localeData().invalidDate()}function
 K(a,b){function c(a){return b.longDateFormat(a)||a}var 
d=5;for(Kc.lastIndex=0;d>=0&(a);)a=a.replace(Kc,c),Kc.lastIndex=0,d-=1;return
 a}function L(a,b,c){_c[a]="function"==typeof b?b:function(a){return 
a&?c:b}}function M(a,b){return f(_c,a)?_c[a](b._strict,b._locale):new 
RegExp(N(a))}function N(a){return 
a.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return
 b||c||d||e}).replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function O(a,b){var 
c,d=b;for("string"==typeof a&&(a=[a]),"number"==typeof 
 b&&(d=function(a,c){c[b]=p(a)}),c=0;cd;d++){if(e=h([2e3,d]),c&&!this._longMonthsParse[d]&&(this._longMonthsParse[d]=new
 
RegExp("^"+this.months(e,"").replace(".","")+"$","i"),this._shortMonthsParse[d]=new
 
RegExp("^"+this.monthsShort(e,"").replace(".","")+"$","i")),c||this._monthsParse[d]||(f="^"+this.months(e,"")+"|^"+this.monthsShort(e,""),this._monthsParse[d]=new
 
RegExp(f.replace(".",""),"i")),c&&""===b&_longMonthsParse[d].test(a))return
 d;if(c&&"MMM"===b&_shortMonthsParse[d].test(a))return 
d;if(!c&_monthsParse[d].test(a))return d
 }}function V(a,b){var c;return"string"==typeof 
b&&(b=a.localeData().monthsParse(b),"number"!=typeof 
b)?a:(c=Math.min(a.date(),R(a.year(),b)),a._d["set"+(a._isUTC?"UTC":"")+"Month"](b,c),a)}function
 W(b){return 
null!=b?(V(this,b),a.updateOffset(this,!0),this):C(this,"Month")}function 
X(){return R(this.year(),this.month())}function Y(a){var b,c=a._a;return 
c&&-2===j(a).overflow&&(b=c[cd]<0||c[cd]>11?cd:c[dd]<1||c[dd]>R(c[bd],c[cd])?dd:c[ed]<0||c[ed]>24||24===c[ed]&&(0!==c[fd]||0!==c[gd]||0!==c[hd])?ed:c[fd]<0||c[fd]>59?fd:c[gd]<0||c[gd]>59?gd:c[hd]<0||c[hd]>999?hd:-1,j(a)._overflowDayOfYear&&(bd>b||b>dd)&&(b=dd),j(a).overflow=b),a}function
 

[10/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.min.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.min.js
new file mode 100644
index 000..9bcd2fc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under the MIT license
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires 
jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" 
")[0].split(".");if(b[0]<2&[1]<9||1==b[0]&&9==b[1]&[2]<1||b[0]>3)throw new 
Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but 
lower than version 4")}(jQuery),+function(a){"use strict";function b(){var 
a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd
 otransitionend",transition:"transitionend"};for(var c in b)if(void 
0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var
 c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var 
e=function(){c||a(d).trigger(a.support.transition.end)};return 
setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b
 .target).is(this))return 
b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use 
strict";function b(b){return this.each(function(){var 
c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new 
d(this)),"string"==typeof b&[b].call(c)})}var 
c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function
 c(){g.detach().trigger("closed.bs.alert").remove()}var 
e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&(/.*(?=#[^\s]*$)/,""));var
 
g=a("#"===f?[]:f);b&(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var
 
e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return
 a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c
 ,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return 
this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof 
b&e||d.data("bs.button",e=new 
c(this,f)),"toggle"==b?e.toggle():b&(b)})}var 
c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var
 
c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var
 a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var 
c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeCla
 
ss("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&("change")}else
 
this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var
 
d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return
 
a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var
 
d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"],
 
input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api
 
blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"us
 e strict";function b(b){return this.each(function(){var 
d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof
 b&),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new 
c(this,f)),"number"==typeof 
b?e.to(b):g?e[g]():f.interval&().cycle()})}var 

[17/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css.map
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css.map
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css.map
new file mode 100644
index 000..f010c82
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["bootstrap.css","less/normalize.less","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labe
 
ls.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"GAIG;AACH,4EAA4E;ACG5E;EACE,wBAAA;EACA,2BAAA;EACA,+BAAA;CDDD;ACQD;EACE,UAAA;CDND;ACmBD;EAaE,eAAA;CDjBD;ACyBDEAIE,sBAAA;EACA,yBAAA;CDvBD;AC+BD;EACE,cAAA;EACA,UAAA;CD7BD;ACqCD;;EAEE,cAAA;CDnCD;AC6CD;EACE,8BAAA;CD3CD;ACmDD;;EAEE,WAAA;CDjDD;AC2DD;EACE,0BAAA;CDzDD;ACg
 
ED;;EAEE,kBAAA;CD9DD;ACqED;EACE,mBAAA;CDnED;AC2ED;EACE,eAAA;EACA,iBAAA;CDzED;ACgFD;EACE,iBAAA;EACA,YAAA;CD9ED;ACqFD;EACE,eAAA;CDnFD;AC0FD;;EAEE,eAAA;EACA,eAAA;EACA,mBAAA;EACA,yBAAA;CDxFD;AC2FD;EACE,YAAA;CDzFD;AC4FD;EACE,gBAAA;CD1FD;ACoGD;EACE,UAAA;CDlGD;ACyGD;EACE,iBAAA;CDvGD;ACiHD;EACE,iBAAA;CD/GD;ACsHD;EACE,gCAAA;KAAA,6BAAA;UAAA,wBAAA;EACA,UAAA;CDpHD;AC2HD;EACE,eAAA;CDzHD;ACgIDEAIE,kCAAA;EACA,eAAA;CD9HD;ACgJD;EAKE,eAAA;EACA,cAAA;EACA,UAAA;CD9ID;ACqJD;EACE,kBAAA;CDnJD;AC6JD;;EAEE,qBAAA;CD3JD;ACsKDEAIE,2BAAA;EACA,gBAAA;CDpKD;AC2KD;;EAEE,gBAAA;CDzKD;ACgLD;;EAEE,UAAA;EACA,WAAA;CD9KD;ACsLD;EACE,oBAAA;CDpLD;AC+LD;;EAEE,+BAAA;KAAA,4BAAA;UAAA,uBAAA;EACA,WAAA;CD7LD;ACsMD;;EAEE,aAAA;CDpMD;AC4MD;EACE,8BAAA;EACA,gCAAA;KAAA,6BAAA;UAAA,wBAAA;CD1MD;ACmND;;EAEE,yBAAA;CDjND;ACwND;EACE,0BAAA;EACA,cAAA;EACA,+BAAA;CDtND;AC8ND;EACE,UAAA;EACA,WAAA;CD5ND;ACmOD;EACE,eAAA;CDjOD;ACyOD;EACE,kBAAA;CDvOD;ACiPD;EACE,0BAAA;EACA,kBAAA;CD/OD;ACkPD;;EAEE,WAAA;CDhPD;AACD,qFAAqF;AElFrF;EA7FI;;;IAGI,mCAAA
 
;IACA,uBAAA;IACA,oCAAA;YAAA,4BAAA;IACA,6BAAA;GFkLL;EE/KC;;IAEI,2BAAA;GFiLL;EE9KC;IACI,6BAAA;GFgLL;EE7KC;IACI,8BAAA;GF+KL;EE1KC;;IAEI,YAAA;GF4KL;EEzKC;;IAEI,uBAAA;IACA,yBAAA;GF2KL;EExKC;IACI,4BAAA;GF0KL;EEvKC;;IAEI,yBAAA;GFyKL;EEtKC;IACI,2BAAA;GFwKL;EErKC;;;IAGI,WAAA;IACA,UAAA;GFuKL;EEpKC;;IAEI,wBAAA;GFsKL;EEhKC;IACI,cAAA;GFkKL;EEhKC;;IAGQ,kCAAA;GFiKT;EE9JC;IACI,uBAAA;GFgKL;EE7JC;IACI,qCAAA;GF+JL;EEhKC;;IAKQ,kCAAA;GF+JT;EE5JC;;IAGQ,kCAAA;GF6JT;CACF;AGnPD;EACE,oCAAA;EACA,sDAAA;EACA,gYAAA;CHqPD;AG7OD;EACE,mBAAA;EACA,SAAA;EACA,sBAAA;EACA,oCAAA;EACA,mBAAA;EACA,oBAAA;EACA,eAAA;EACA,oCAAA;EACA,mCAAA;CH+OD;AG3OmC;EAAW,iBAAA;CH8O9C;AG7OmC;EAAW,iBAAA;CHgP9C;AG9OmC;;EAAW,iBAAA;CHkP9C;AGjPmC;EAAW,iBAAA;CHoP9C;AGnPmC;EAAW,iBAAA;CHsP9C;AGrPmC;EAAW,iBAAA;CHwP9C;AGvPmC;EAAW,iBAAA;CH0P9C;AGzPmC;EAAW,iBAAA;CH4P9C;AG3PmC;EAAW,iBAAA;CH8P9C;AG7PmC;EAAW,iBAAA;CHgQ9C;AG/PmC;EAAW,iBAAA;CHkQ9C;AGjQmC;EAAW,iBAAA;CHoQ9C;AGnQmC;EAAW,iBAAA;CHsQ9C;AGrQmC;EAAW,iBAAA;CHwQ9C;AGvQmC;EAAW,iBAAA;CH0Q9C;AGzQmC;EAAW,iBA
 

[02/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js
new file mode 100644
index 000..4d9b3a2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | 
jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof 
module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw
 new Error("jQuery requires a window with a document");return 
t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use 
strict";var 
n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function
 e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function 
e(t){return null!=t&===t.window},v={type:!0,src:!0,noModule:!0};function 
m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in 
v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function 
x(e){return null==e?e+"":"object"==typeof e||"function"==typeof 
e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new 
w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructo
 r:w,length:0,toArray:function(){return o.call(this)},get:function(e){return 
null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var 
t=w.merge(this.constructor(),e);return 
t.prevObject=this,t},each:function(e){return 
w.each(this,e)},map:function(e){return 
this.pushStack(w.map(this,function(t,n){return 
e.call(t,n,t)}))},slice:function(){return 
this.pushStack(o.apply(this,arguments))},first:function(){return 
this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var 
t=this.length,n=+e+(e<0?t:0);return 
this.pushStack(n>=0&0& in e)}var E=function(e){var 
t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new 
Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return 
e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var
 n=0,r=e.length;n+~]|"+M+")"+M+"*"),z=new 
RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new 
RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new 
RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new 
RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new 
RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new
 RegExp("^(?:"+P+")$","i"),needsContext:new 
RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|n
 
th|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native
 \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new 
RegExp("([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var 
r="0x"+t-65536;return 
r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\u\w-]/g,ne=function(e,t){return
 t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" 
":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in 
e||"label"in 
e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var
 n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var 
o,s,l,c,f,h,v,m=t&,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof 
e
 ||!e||1!==T&&9!==T&&11!==T)return 
r;if(!i&&((t?t.ownerDocument||t:w)!==d&(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return
 r;if(l.id===o)return r.push(l),r}else 
if(m&&(l=m.getElementById(o))&(t,l)&===o)return 
r.push(l),r}else{if(f[2])return 
L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&)return
 L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" 
"]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else 
if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+"
 "+ve(h[s]);v=h.join(","),m=K.test(e)&(t.parentNode)||t}if(v)try{return 
L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&("id")}}}return
 u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return 
e.push(n+" ")>r.cacheLength& t[e.shift()],t[n+" 

[25/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, 
Mukul Kumar Singh and Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4c7c911
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4c7c911
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4c7c911

Branch: refs/heads/trunk
Commit: f4c7c91123b1dbb12bcc007047963b04ad46
Parents: 29024a6
Author: Jitendra Pandey 
Authored: Wed Jun 13 00:36:02 2018 -0700
Committer: Jitendra Pandey 
Committed: Wed Jun 13 00:36:02 2018 -0700

--
 LICENSE.txt | 4 +-
 .../server-scm/src/main/webapps/scm/index.html  | 6 +-
 .../main/webapps/router/federationhealth.html   | 6 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 +-
 .../src/main/webapps/datanode/datanode.html | 6 +-
 .../src/main/webapps/hdfs/dfshealth.html| 6 +-
 .../src/main/webapps/hdfs/dfshealth.js  | 8 +-
 .../src/main/webapps/hdfs/explorer.html |10 +-
 .../src/main/webapps/hdfs/explorer.js   |34 +-
 .../src/main/webapps/journal/index.html | 6 +-
 .../src/main/webapps/secondary/status.html  | 6 +-
 .../bootstrap-3.0.2/css/bootstrap-editable.css  |   655 -
 .../bootstrap-3.0.2/css/bootstrap.min.css   | 9 -
 .../fonts/glyphicons-halflings-regular.eot  |   Bin 20290 -> 0 bytes
 .../fonts/glyphicons-halflings-regular.svg  |   229 -
 .../fonts/glyphicons-halflings-regular.ttf  |   Bin 41236 -> 0 bytes
 .../fonts/glyphicons-halflings-regular.woff |   Bin 23292 -> 0 bytes
 .../static/bootstrap-3.0.2/img/clear.png|   Bin 509 -> 0 bytes
 .../static/bootstrap-3.0.2/img/loading.gif  |   Bin 1849 -> 0 bytes
 .../js/bootstrap-editable.min.js| 7 -
 .../static/bootstrap-3.0.2/js/bootstrap.min.js  | 9 -
 .../bootstrap-3.3.7/css/bootstrap-editable.css  |   655 +
 .../bootstrap-3.3.7/css/bootstrap-theme.css |   587 +
 .../bootstrap-3.3.7/css/bootstrap-theme.css.map | 1 +
 .../bootstrap-3.3.7/css/bootstrap-theme.min.css | 6 +
 .../css/bootstrap-theme.min.css.map | 1 +
 .../static/bootstrap-3.3.7/css/bootstrap.css|  6757 
 .../bootstrap-3.3.7/css/bootstrap.css.map   | 1 +
 .../bootstrap-3.3.7/css/bootstrap.min.css   | 6 +
 .../bootstrap-3.3.7/css/bootstrap.min.css.map   | 1 +
 .../fonts/glyphicons-halflings-regular.eot  |   Bin 0 -> 20127 bytes
 .../fonts/glyphicons-halflings-regular.svg  |   288 +
 .../fonts/glyphicons-halflings-regular.ttf  |   Bin 0 -> 45404 bytes
 .../fonts/glyphicons-halflings-regular.woff |   Bin 0 -> 23424 bytes
 .../fonts/glyphicons-halflings-regular.woff2|   Bin 0 -> 18028 bytes
 .../js/bootstrap-editable.min.js| 7 +
 .../static/bootstrap-3.3.7/js/bootstrap.js  |  2377 +++
 .../static/bootstrap-3.3.7/js/bootstrap.min.js  | 7 +
 .../webapps/static/bootstrap-3.3.7/js/npm.js|13 +
 .../src/main/webapps/static/dfs-dust.js | 2 +-
 .../main/webapps/static/jquery-1.10.2.min.js| 6 -
 .../src/main/webapps/static/jquery-3.3.1.min.js | 2 +
 .../src/main/webapps/static/moment.min.js   | 6 +-
 .../test/robotframework/acceptance/ozone.robot  | 4 +-
 .../src/main/webapps/ksm/index.html | 6 +-
 hadoop-ozone/pom.xml| 4 +-
 .../src/main/html/js/thirdparty/jquery.js   | 13607 +
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  | 2 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java   | 2 +-
 .../webapps/static/jquery/jquery-1.8.2.min.js   | 2 -
 .../webapps/static/jquery/jquery-3.3.1.min.js   | 2 +
 .../webapps/static/jt/jquery.jstree.js  |42 +-
 52 files changed, 17883 insertions(+), 7516 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 75c5562..f8de86a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -617,7 +617,7 @@ OTHER DEALINGS IN THE SOFTWARE.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
+hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
@@ -761,7 +761,7 @@ THE SOFTWARE.
 
 
 For:
-hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
+hadoop-hdfs-project/hadoop-hdfs/src/main/

[15/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css.map
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css.map
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css.map
new file mode 100644
index 000..6c7fa40
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["less/normalize.less","less/print.less","bootstrap.css","dist/css/bootstrap.css","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","le
 
ss/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"4EAQA,KACE,YAAA,WACA,yBAAA,KACA,qBAAA,KAOF,KACE,OAAA,EAaF,QAAA,MAAA,QAAA,WAAA,OAAA,OAAA,OAAA,OAAA,KAAA,KAAA,IAAA,QAAA,QAaE,QAAA,MAQF,MAAA,OAAA,SAAA,MAIE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SAAA,SAEE,QAAA,KAUF,EACE,iBAAA,YAQF,SAAA,QAEE,QAAA,EAUF,YAC
 
E,cAAA,IAAA,OAOF,EAAA,OAEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,OAAA,MAAA,EACA,UAAA,IAOF,KACE,MAAA,KACA,WAAA,KAOF,MACE,UAAA,IAOF,IAAA,IAEE,SAAA,SACA,UAAA,IACA,YAAA,EACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,OAAA,EAAA,mBAAA,YAAA,gBAAA,YACA,WAAA,YAOF,IACE,SAAA,KAOF,KAAA,IAAA,IAAA,KAIE,YAAA,UAAA,UACA,UAAA,IAkBF,OAAA,MAAA,SAAA,OAAA,SAKE,OAAA,EACA,KAAA,QACA,MAAA,QAOF,OACE,SAAA,QAUF,OAAA,OAEE,eAAA,KAWF,OAAA,wBAAA,kBAAA,mBAIE,mBAAA,OACA,OAAA,QAOF,iBAAA,qBAEE,OAAA,QAOF,yBAAA,wBAEE,QAAA,EACA,OAAA,EAQF,MACE,YAAA,OAWF,qBAAA,kBAEE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CAAA,8CAEE,OAAA,KAQF,mBACE,mBAAA,YACA,gBAAA,YAAA,WAAA,YAAA,mBAAA,UASF,iDAAA,8CAEE,mBAAA,KAOF,SACE,QAAA,MAAA,OAAA,MACA,OAAA,EAAA,IACA,OAAA,IAAA,MAAA,OAQF,OACE,QAAA,EACA,OAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,eAAA,EACA,gBAAA,SAGF,GAAA,GAEE,QAAA,uFCjUF,aA7FI,EAAA,OAAA,QAGI,MAAA,eACA,YAAA,eACA,WAAA,cAAA,mBAAA,eACA,WAAA,eAGJ,EAAA,UAEI,gBAAA,UAGJ,cACI,QAAA,KAAA,WAAA,IAGJ,kBACI
 
,QAAA,KAAA,YAAA,IAKJ,6BAAA,mBAEI,QAAA,GAGJ,WAAA,IAEI,OAAA,IAAA,MAAA,KC4KL,kBAAA,MDvKK,MC0KL,QAAA,mBDrKK,IE8KN,GDLC,kBAAA,MDrKK,ICwKL,UAAA,eCUD,GF5KM,GE2KN,EF1KM,QAAA,ECuKL,OAAA,ECSD,GF3KM,GCsKL,iBAAA,MD/JK,QCkKL,QAAA,KCSD,YFtKU,oBCiKT,iBAAA,eD7JK,OCgKL,OAAA,IAAA,MAAA,KD5JK,OC+JL,gBAAA,mBCSD,UFpKU,UC+JT,iBAAA,eDzJS,mBEkKV,mBDLC,OAAA,IAAA,MAAA,gBEjPD,WACA,YAAA,uBFsPD,IAAA,+CE7OC,IAAK,sDAAuD,4BAA6B,iDAAkD,gBAAiB,gDAAiD,eAAgB,+CAAgD,mBAAoB,2EAA4E,cAE7W,WACA,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EAIkC,uBAAA,YAAW,wBAAA,UACX,2BAAW,QAAA,QAEX,uBDuPlC,QAAS,QCtPyB,sBFiPnC,uBEjP8C,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX
 

[24/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/css/bootstrap.min.css
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/css/bootstrap.min.css
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/css/bootstrap.min.css
deleted file mode 100644
index 3deec34..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/css/bootstrap.min.css
+++ /dev/null
@@ -1,9 +0,0 @@
-/*!
- * Bootstrap v3.0.2 by @fat and @mdo
- * Copyright 2013 Twitter, Inc.
- * Licensed under http://www.apache.org/licenses/LICENSE-2.0
- *
- * Designed and built with all the love in the world by @mdo and @fat.
- */
-
-/*! normalize.css v2.1.3 | MIT License | git.io/normalize 
*/article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden],template{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a{background:transparent}a:focus{outline:thin
 dotted}a:active,a:hover{outline:0}h1{margin:.67em 
0;font-size:2em}abbr[title]{border-bottom:1px 
dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C"
 "\201D" "\2018" 
"\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{paddi
 ng:.35em .625em .75em;margin:0 2px;border:1px solid 
#c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html
 
input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html
 
input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media
 
print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!impo
 rtant}a,a:visited{text-decoration:underline}a[href]:after{content:" (" 
attr(href) ")"}abbr[title]:after{content:" (" attr(title) 
")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px
 solid 
#999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm
 
.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.table
 td,.table 
th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px
 solid #000}.table{border-collapse:collapse!important}.table-bordered 
th,.table-bordered td{border:1px solid 
#ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica
 Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;
 
color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin
 dotted #333;outline:5px auto 
-webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px
 solid #ddd;border-radius:4px;-webkit-transition:all .2s 
ease-in-out;transition:all .2s 
ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px
 solid 
#eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}p{margin:0
 0 
10px}.lead{margin-bottom:20px;font-size:16px;font-weight:200;line-height:1.4}@media(min-width:768px){.lead{font-si
 

[04/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 3338052..af1440a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -243,7 +243,7 @@
 
src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
 
src/main/resources/webapps/static/jt/jquery.jstree.js
 
src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js
-
src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
+
src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js
 
src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css
 
src/test/resources/application_1440536969523_0001.har/_index
 
src/test/resources/application_1440536969523_0001.har/part-0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index dba19c9..d4fba1f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -67,7 +67,7 @@ public class JQueryUI extends HtmlBlock {
   protected void render(Block html) {
 html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css"))
 .link(root_url("static/dt-1.9.4/css/jui-dt.css"))
-.script(root_url("static/jquery/jquery-1.8.2.min.js"))
+.script(root_url("static/jquery/jquery-3.3.1.min.js"))
 .script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js"))
 .script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js"))
 .script(root_url("static/yarn.dt.plugins.js"))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.svg
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.svg
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 000..f155876
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,288 @@
+
+http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd; >
+http://www.w3.org/2000/svg;>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

[14/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.eot
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.eot
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.eot
new file mode 100644
index 000..b93a495
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.eot
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.ttf
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.ttf
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 000..1413fc6
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.ttf
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 000..9e61285
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff2
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff2
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff2
new file mode 100644
index 000..64539b5
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff2
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap-editable.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap-editable.min.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap-editable.min.js
new file mode 100644
index 000..539d6c1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap-editable.min.js
@@ -0,0 +1,7 @@
+/*! X-editable - v1.5.0
+* In-place editing with Twitter Bootstrap, jQuery UI or pure jQuery
+* http://github.com/vitalets/x-editable
+* Copyright (c) 2013 Vitaliy Potapov; Licensed MIT */
+!function(a){"use strict";var 
b=function(b,c){this.options=a.extend({},a.fn.editableform.defaults,c),this.$div=a(b),this.options.scope||(this.options.scope=this)};b.prototype={constructor:b,initInput:function(){this.input=this.options.input,this.value=this.input.str2value(this.options.value),this.input.prerender()},initTemplate:function(){this.$form=a(a.fn.editableform.template)},initButtons:function(){var
 
b=this.$form.find(".editable-buttons");b.append(a.fn.editableform.buttons),"bottom"===this.options.showbuttons&("editable-buttons-bottom")},render:function(){this.$loading=a(a.fn.editableform.loading),this.$div.empty().append(this.$loading),this.initTemplate(),this.options.showbuttons?this.initButtons():this.$form.find(".editable-buttons").remove(),this.showLoading(),this.isSaving=!1,this.$div.triggerHandler("rendering"),this.initInput(),this.$form.find("div.editable-input").append(this.input.$tpl),this.$div.append(this.$form),a.when(this.input.render()).then(a.proxy(fu
 
nction(){if(this.options.showbuttons||this.input.autosubmit(),this.$form.find(".editable-cancel").click(a.proxy(this.cancel,this)),this.input.error)this.error(this.input.error),this.$form.find(".editable-submit").attr("disabled",!0),this.input.$input.attr("disabled",!0),this.$form.submit(function(a){a.preventDefault()});else{this.error(!1),this.input.$input.removeAttr("disabled"),this.$form.find(".editable-submit").removeAttr("disabled");var
 b=null===this.value||void 
0===this.value||""===this.value?this.options.defaultValue:this.value;this.input.value2input(b),this.$form.submit(a.proxy(this.submit,this))}this.$div.triggerHandler("rendered"),this.showForm(),this.input.postrender&()},this))},cancel:function(){this.$div.triggerHandler("cancel")},showLoading:function(){var
 
a,b;this.$form?(a=this.$form.outerWidth(),b=this.$form.outerHeight(),a&$loading.width(a),b&$loading.height(b),this.$form.hide()):(a=this.$loading.parent().width(),a&$loading.wid
 
th(a)),this.$loading.show()},showForm:function(a){this.$loading.hide(),this.$form.show(),a!==!1&(),this.$div.triggerHandler("show")},error:function(b){var
 

[16/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css
new file mode 100644
index 000..ed3905e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css 
*/html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px
 dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 
0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em
 
40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;fo
 
nt-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html
 
input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html
 
input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em
 .625em .75em;margin:0 2px;border:1px solid 
silver}legend{padding:0;border:0}textarea{overflow:
 
auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*!
 Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css 
*/@media 
print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0
 
0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:"
 (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) 
")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px
 solid 
#999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px
 solid #000}.table{border-collapse:collapse!important}.table td,.table 
th{background-color:#fff!important}.table-bordered td,.table-bordered 
th{border:1px so
 lid #ddd!important}}@font-face{font-family:'Glyphicons 
Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix)
 format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) 
format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) 
format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) 
format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular)
 
format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons
 
Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\
 

[19/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css.map
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css.map
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css.map
new file mode 100644
index 000..94813e9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAmBA,YAAA,aAAA,UAAA,aAAA,aAAA,aAME,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBDvCR,mBAAA,mBAAA,oBAAA,oBAAA,iBAAA,iBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBCsCA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBDlCR,qBAAA,sBAAA,sBAAA,uBAAA,mBAAA,oBAAA,sBAAA,uBAAA,sBAAA,uBAAA,sBAAA,uBAAA,+BAAA,gCAAA,6BAAA,gCAAA,gCAAA,gCCiCA,mBAAA,KACQ,WAAA,KDlDV,mBAAA,oBAAA,iBAAA,oBAAA,oBAAA,oBAuBI,YAAA,KAyCF,YAAA,YAEE,iBAAA,KAKJ,aErEI,YAAA,EAAA,IAAA,EAAA,KACA,iBAAA,iDACA,iBAAA,4CAAA,iBAAA,qEAEA,iBAAA,+CCnBF,OAAA,+GH4CA,OAAA,0DACA,kBAAA,SAuC2C,aAAA,QAA2B,aAAA,KArCtE,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,Q
 
ACA,iBAAA,KAgBN,aEtEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAiBN,aEvEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAkBN,UExEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,gBAAA,gBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,iBAAA,iBAEE,iBAAA,QACA,aAAA,QAMA,mBAAA,0BAAA,yBAAA,0BAAA,yBAAA,yBAAA,oBAAA,2BAAA,0BAAA,2BAAA,0BAAA,0BAAA,6BAAA,oCAAA,mCAAA,oCAAA,mCAAA,mCAME,iBAAA,QACA,iBAAA,KAmBN,aEzEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4
 
CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAoBN,YE1EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,kBAAA,kBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,mBAAA,mBAEE,iBAAA,QACA,aAAA,QAMA,qBAAA,4BAAA,2BAAA,4BAAA,2BAAA,2BAAA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,+BAAA,sCAAA,qCAAA,sCAAA,qCAAA,qCAME,iBAAA,QACA,iBAAA,KA2BN,eAAA,WClCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBD2CV,0BAAA,0BE3FI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GF0FF,kBAAA,SAEF,yBAAA,+BAAA,+BEhGI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GFgGF,kBAAA,SASF,gBE7GI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SH+HA,cAAA,ICjEA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBD6DV,
 
sCAAA,oCE7GI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD0EV,cAAA,iBAEE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEhII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SHkJA,cAAA,IAHF,sCAAA,oCEhII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDgFV,8BAAA,iCAYI,YAAA,EAAA,KAAA,EAAA,gBAKJ,qBAAA,kBAAA,mBAGE,cAAA,EAqBF,yBAfI,mDAAA,yDAAA,yDAGE,MAAA,KE7JF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UFqKJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC3HA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBDsIV,eEtLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAKF,YEvLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAMF,eExLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aA
 

[23/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.eot
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.eot
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.eot
deleted file mode 100644
index 423bd5d..000
Binary files 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.eot
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.svg
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.svg
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.svg
deleted file mode 100644
index 4469488..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.svg
+++ /dev/null
@@ -1,229 +0,0 @@
-
-http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd; >
-http://www.w3.org/2000/svg;>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.ttf
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.ttf
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.ttf
deleted file mode 100644
index a498ef4..000
Binary files 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.ttf
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.woff
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.woff
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.woff
deleted file mode 100644
index d83c539..000
Binary files 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.woff
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/clear.png
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/clear.png
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/clear.png
deleted file mode 100644
index 580b52a..000
Binary files 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/clear.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/loading.gif
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/loading.gif
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/loading.gif
deleted file mode 100644
index 5b33f7e..000
Binary files 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/loading.gif
 and /dev/null differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js
new file mode 100644
index 000..4d9b3a2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | 
jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof 
module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw
 new Error("jQuery requires a window with a document");return 
t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use 
strict";var 
n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function
 e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function 
e(t){return null!=t&===t.window},v={type:!0,src:!0,noModule:!0};function 
m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in 
v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function 
x(e){return null==e?e+"":"object"==typeof e||"function"==typeof 
e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new 
w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructo
 r:w,length:0,toArray:function(){return o.call(this)},get:function(e){return 
null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var 
t=w.merge(this.constructor(),e);return 
t.prevObject=this,t},each:function(e){return 
w.each(this,e)},map:function(e){return 
this.pushStack(w.map(this,function(t,n){return 
e.call(t,n,t)}))},slice:function(){return 
this.pushStack(o.apply(this,arguments))},first:function(){return 
this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var 
t=this.length,n=+e+(e<0?t:0);return 
this.pushStack(n>=0&0& in e)}var E=function(e){var 
t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new 
Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return 
e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var
 n=0,r=e.length;n+~]|"+M+")"+M+"*"),z=new 
RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new 
RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new 
RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new 
RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new 
RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new
 RegExp("^(?:"+P+")$","i"),needsContext:new 
RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|n
 
th|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native
 \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new 
RegExp("([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var 
r="0x"+t-65536;return 
r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\u\w-]/g,ne=function(e,t){return
 t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" 
":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in 
e||"label"in 
e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var
 n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var 
o,s,l,c,f,h,v,m=t&,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof 
e
 ||!e||1!==T&&9!==T&&11!==T)return 
r;if(!i&&((t?t.ownerDocument||t:w)!==d&(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return
 r;if(l.id===o)return r.push(l),r}else 
if(m&&(l=m.getElementById(o))&(t,l)&===o)return 
r.push(l),r}else{if(f[2])return 
L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&)return
 L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" 
"]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else 
if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+"
 "+ve(h[s]);v=h.join(","),m=K.test(e)&(t.parentNode)||t}if(v)try{return 
L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&("id")}}}return
 u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return 
e.push(n+" ")>r.cacheLength& t[e.shift()],t[n+" "]=i}return t}function 
se(e){return e[b]=!0,
 e}function ue(e){var 

[18/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css
new file mode 100644
index 000..6167622
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css
@@ -0,0 +1,6757 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */
+html {
+  font-family: sans-serif;
+  -webkit-text-size-adjust: 100%;
+  -ms-text-size-adjust: 100%;
+}
+body {
+  margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+menu,
+nav,
+section,
+summary {
+  display: block;
+}
+audio,
+canvas,
+progress,
+video {
+  display: inline-block;
+  vertical-align: baseline;
+}
+audio:not([controls]) {
+  display: none;
+  height: 0;
+}
+[hidden],
+template {
+  display: none;
+}
+a {
+  background-color: transparent;
+}
+a:active,
+a:hover {
+  outline: 0;
+}
+abbr[title] {
+  border-bottom: 1px dotted;
+}
+b,
+strong {
+  font-weight: bold;
+}
+dfn {
+  font-style: italic;
+}
+h1 {
+  margin: .67em 0;
+  font-size: 2em;
+}
+mark {
+  color: #000;
+  background: #ff0;
+}
+small {
+  font-size: 80%;
+}
+sub,
+sup {
+  position: relative;
+  font-size: 75%;
+  line-height: 0;
+  vertical-align: baseline;
+}
+sup {
+  top: -.5em;
+}
+sub {
+  bottom: -.25em;
+}
+img {
+  border: 0;
+}
+svg:not(:root) {
+  overflow: hidden;
+}
+figure {
+  margin: 1em 40px;
+}
+hr {
+  height: 0;
+  -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+  box-sizing: content-box;
+}
+pre {
+  overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: monospace, monospace;
+  font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+  margin: 0;
+  font: inherit;
+  color: inherit;
+}
+button {
+  overflow: visible;
+}
+button,
+select {
+  text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+  -webkit-appearance: button;
+  cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+  cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+  padding: 0;
+  border: 0;
+}
+input {
+  line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+  -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+  box-sizing: border-box;
+  padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+  height: auto;
+}
+input[type="search"] {
+  -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+  box-sizing: content-box;
+  -webkit-appearance: textfield;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+  -webkit-appearance: none;
+}
+fieldset {
+  padding: .35em .625em .75em;
+  margin: 0 2px;
+  border: 1px solid #c0c0c0;
+}
+legend {
+  padding: 0;
+  border: 0;
+}
+textarea {
+  overflow: auto;
+}
+optgroup {
+  font-weight: bold;
+}
+table {
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+td,
+th {
+  padding: 0;
+}
+/*! Source: 
https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */
+@media print {
+  *,
+  *:before,
+  *:after {
+color: #000 !important;
+text-shadow: none !important;
+background: transparent !important;
+-webkit-box-shadow: none !important;
+box-shadow: none !important;
+  }
+  a,
+  a:visited {
+text-decoration: underline;
+  }
+  a[href]:after {
+content: " (" attr(href) ")";
+  }
+  abbr[title]:after {
+content: " (" attr(title) ")";
+  }
+  a[href^="#"]:after,
+  a[href^="javascript:"]:after {
+content: "";
+  }
+  pre,
+  blockquote {
+border: 1px solid #999;
+
+page-break-inside: avoid;
+  }
+  thead {
+display: table-header-group;
+  }
+  tr,
+  img {
+page-break-inside: avoid;
+  }
+  img {
+max-width: 100% !important;
+  }
+  p,
+  h2,
+  h3 {
+orphans: 3;
+widows: 3;
+  }
+  h2,
+  h3 {
+page-break-after: avoid;
+  }
+  .navbar {
+display: none;
+  }
+  .btn > .caret,
+  .dropup > .btn > .caret {
+border-top-color: #000 !important;
+  }
+  .label {
+border: 1px solid #000;
+  }
+  .table {
+border-collapse: collapse !important;
+  }
+  .table td,
+  .table th {
+background-color: #fff !important;
+  }
+  .table-bordered th,
+  .table-bordered td {
+border: 1px solid #ddd !important;
+  }
+}
+@font-face {
+  font-family: 'Glyphicons Halflings';
+
+  src: 

[06/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 5d57e10..cffef14 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -143,8 +143,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 src/main/webapps/router/robots.txt
 src/contrib/**
 src/site/resources/images/*
-webapps/static/bootstrap-3.0.2/**
-webapps/static/jquery-1.10.2.min.js
+webapps/static/bootstrap-3.3.7/**
+webapps/static/jquery-3.3.1.min.js
 webapps/static/jquery.dataTables.min.js
 webapps/static/nvd3-1.8.5.min.css.map
 webapps/static/nvd3-1.8.5.min.js


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
--
diff --git a/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js 
b/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
index 38f046c..9b5206b 100644
--- a/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
+++ b/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
@@ -1,247 +1,173 @@
 /*!
- * jQuery JavaScript Library v1.10.2
- * http://jquery.com/
+ * jQuery JavaScript Library v3.3.1
+ * https://jquery.com/
  *
  * Includes Sizzle.js
- * http://sizzlejs.com/
+ * https://sizzlejs.com/
  *
- * Copyright 2005, 2013 jQuery Foundation, Inc. and other contributors
+ * Copyright JS Foundation and other contributors
  * Released under the MIT license
- * http://jquery.org/license
+ * https://jquery.org/license
  *
- * Date: 2013-07-03T13:48Z
+ * Date: 2018-01-20T17:24Z
  */
-(function( window, undefined ) {
-
-// Can't do this because several apps including ASP.NET trace
-// the stack via arguments.caller.callee and Firefox dies if
-// you try to trace through "use strict" call chains. (#13335)
-// Support: Firefox 18+
-//"use strict";
-var
-   // The deferred used on DOM ready
-   readyList,
-
-   // A central reference to the root jQuery(document)
-   rootjQuery,
-
-   // Support: IE<10
-   // For `typeof xmlNode.method` instead of `xmlNode.method !== undefined`
-   core_strundefined = typeof undefined,
+( function( global, factory ) {
+
+   "use strict";
+
+   if ( typeof module === "object" && typeof module.exports === "object" ) 
{
+
+   // For CommonJS and CommonJS-like environments where a proper 
`window`
+   // is present, execute the factory and get jQuery.
+   // For environments that do not have a `window` with a 
`document`
+   // (such as Node.js), expose a factory as module.exports.
+   // This accentuates the need for the creation of a real 
`window`.
+   // e.g. var jQuery = require("jquery")(window);
+   // See ticket #14549 for more info.
+   module.exports = global.document ?
+   factory( global, true ) :
+   function( w ) {
+   if ( !w.document ) {
+   throw new Error( "jQuery requires a 
window with a document" );
+   }
+   return factory( w );
+   };
+   } else {
+   factory( global );
+   }
 
-   // Use the correct document accordingly with window argument (sandbox)
-   location = window.location,
-   document = window.document,
-   docElem = document.documentElement,
+// Pass this if window is not defined yet
+} )( typeof window !== "undefined" ? window : this, function( window, noGlobal 
) {
 
-   // Map over jQuery in case of overwrite
-   _jQuery = window.jQuery,
+// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 
9.1
+// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict 
mode
+// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict 
mode should be common
+// enough that all such attempts are guarded in a try block.
+"use strict";
 
-   // Map over the $ in case of overwrite
-   _$ = window.$,
+var arr = [];
 
-   // [[Class]] -> type pairs
-   class2type = {},
+var document = window.document;
 
-   // List of deleted data cache ids, so we can reuse them
-   core_deletedIds = [],
+var getProto = Object.getPrototypeOf;
 
-   core_version = "1.10.2",
+var slice = arr.slice;
 
-   // Save a reference to some core methods
-   core_concat = core_deletedIds.concat,
-   core_push = core_deletedIds.push,
-   core_slice = core_deletedIds.slice,
-   core_indexOf = core_deletedIds.indexOf,
-   core_toString = class2type.toString,
-   core_hasOwn = class2type.hasOwnProperty,
-   core_trim = core_version.trim,
+var concat = arr.concat;
 
-   // Define a local copy of jQuery
-   jQuery = function( selector, context ) {
-   // The jQuery object is actually just the init constructor 
'enhanced'
-   return new jQuery.fn.init( selector, context, rootjQuery );
-   },
+var push = arr.push;
 
-   // Used for matching numbers
-   core_pnum = /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,
+var indexOf = arr.indexOf;
 
-   // Used for splitting on whitespace
-   core_rnotwhite = /\S+/g,
+var class2type = {};
 
-   // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 
and IE)
-   rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
+var toString = class2type.toString;
 
-   // A simple way to check for HTML strings
-   // 

[01/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 29024a620 -> f4c7c9112


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
index d4d8985..b212c9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
@@ -435,7 +435,7 @@
   .undelegate(".jstree")
   .removeData("jstree-instance-id")
   .find("[class^='jstree']")
-.andSelf()
+.addBack()
 .attr("class", function () { return 
this.className.replace(/jstree[^ ]*|$/ig,''); });
 $(document)
   .unbind(".jstree-" + n)
@@ -678,7 +678,7 @@
 }
 else {
   original_obj = obj;
-  if(obj.is(".jstree-closed")) { obj = 
obj.find("li.jstree-closed").andSelf(); }
+  if(obj.is(".jstree-closed")) { obj = 
obj.find("li.jstree-closed").addBack(); }
   else { obj = obj.find("li.jstree-closed"); }
 }
 var _this = this;
@@ -694,12 +694,12 @@
 var _this = this;
 obj = obj ? this._get_node(obj) : this.get_container();
 if(!obj || obj === -1) { obj = this.get_container_ul(); }
-obj.find("li.jstree-open").andSelf().each(function () { 
_this.close_node(this, !do_animation); });
+obj.find("li.jstree-open").addBack().each(function () { 
_this.close_node(this, !do_animation); });
 this.__callback({ "obj" : obj });
   },
   clean_node  : function (obj) {
 obj = obj && obj != -1 ? $(obj) : this.get_container_ul();
-obj = obj.is("li") ? obj.find("li").andSelf() : obj.find("li");
+obj = obj.is("li") ? obj.find("li").addBack() : obj.find("li");
 obj.removeClass("jstree-last")
   .filter("li:last-child").addClass("jstree-last").end()
   .filter(":has(li)")
@@ -922,7 +922,7 @@
 if(!obj || !obj.o || obj.or[0] === obj.o[0]) { return false; }
 if(obj.op && obj.np && obj.op[0] === obj.np[0] && obj.cp - 1 === 
obj.o.index()) { return false; }
 obj.o.each(function () {
-  if(r.parentsUntil(".jstree", "li").andSelf().index(this) !== -1) { 
ret = false; return false; }
+  if(r.parentsUntil(".jstree", "li").addBack().index(this) !== -1) { 
ret = false; return false; }
 });
 return ret;
   },
@@ -941,7 +941,7 @@
 var o = false;
 if(is_copy) {
   o = obj.o.clone(true);
-  o.find("*[id]").andSelf().each(function () {
+  o.find("*[id]").addBack().each(function () {
 if(this.id) { this.id = "copy_" + this.id; }
   });
 }
@@ -1138,7 +1138,7 @@
   switch(!0) {
 case (is_range):
   this.data.ui.last_selected.addClass("jstree-last-selected");
-  obj = obj[ obj.index() < this.data.ui.last_selected.index() ? 
"nextUntil" : "prevUntil" ](".jstree-last-selected").andSelf();
+  obj = obj[ obj.index() < this.data.ui.last_selected.index() ? 
"nextUntil" : "prevUntil" ](".jstree-last-selected").addBack();
   if(s.select_limit == -1 || obj.length < s.select_limit) {
 this.data.ui.last_selected.removeClass("jstree-last-selected");
 this.data.ui.selected.each(function () {
@@ -1242,7 +1242,7 @@
 .bind("move_node.jstree", $.proxy(function (e, data) {
   if(this._get_settings().crrm.move.open_onmove) {
 var t = this;
-
data.rslt.np.parentsUntil(".jstree").andSelf().filter(".jstree-closed").each(function
 () {
+
data.rslt.np.parentsUntil(".jstree").addBack().filter(".jstree-closed").each(function
 () {
   t.open_node(this, false, true);
 });
   }
@@ -2799,7 +2799,7 @@
 obj.each(function () {
   t = $(this);
   c = t.is("li") && (t.hasClass("jstree-checked") || (rc && 
t.children(":checked").length)) ? "jstree-checked" : "jstree-unchecked";
-  t.find("li").andSelf().each(function () {
+  t.find("li").addBack().each(function () {
 var $t = $(this), nm;
 $t.children("a" + (_this.data.languages ? "" : ":eq(0)") 
).not(":has(.jstree-checkbox)").prepend("").parent().not(".jstree-checked, 
.jstree-unchecked").addClass( ts ? "jstree-unchecked" : c );
 if(rc) {
@@ -2843,13 +2843,13 @@
 }
 else {
   if(state) {
-coll = obj.find("li").andSelf();
+coll = obj.find("li").addBack();
 

[09/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
deleted file mode 100644
index da41706..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
+++ /dev/null
@@ -1,6 +0,0 @@
-/*! jQuery v1.10.2 | (c) 2005, 2013 jQuery Foundation, Inc. | 
jquery.org/license
-//@ sourceMappingURL=jquery-1.10.2.min.map
-*/
-(function(e,t){var n,r,i=typeof 
t,o=e.location,a=e.document,s=a.documentElement,l=e.jQuery,u=e.$,c={},p=[],f="1.10.2",d=p.concat,h=p.push,g=p.slice,m=p.indexOf,y=c.toString,v=c.hasOwnProperty,b=f.trim,x=function(e,t){return
 new 
x.fn.init(e,t,r)},w=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=/\S+/g,C=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,k=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,E=/^[\],:{}\s]*$/,S=/(?:^|:|,)(?:\s*\[)+/g,A=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,j=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,D=/^-ms-/,L=/-([\da-z])/gi,H=function(e,t){return
 
t.toUpperCase()},q=function(e){(a.addEventListener||"load"===e.type||"complete"===a.readyState)&&(_(),x.ready())},_=function(){a.addEventListener?(a.removeEventListener("DOMContentLoaded",q,!1),e.removeEventListener("load",q,!1)):(a.detachEvent("onreadystatechange",q),e.detachEvent("onload",q))};x.fn=x.prototype={jquery:f,constructor:x,init:function(e,n,r){var
 i,o;if(!e)return this;
 if("string"==typeof 
e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&>=3?[null,e,null]:N.exec(e),!i||!i[1]&)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n
 instanceof 
x?n[0]:n,x.merge(this,x.parseHTML(i[1],n&?n.ownerDocument||n:a,!0)),k.test(i[1])&(n))for(i
 in n)x.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return 
this}if(o=a.getElementById(i[2]),o&){if(o.id!==i[2])return 
r.find(e);this.length=1,this[0]=o}return 
this.context=a,this.selector=e,this}return 
e.nodeType?(this.context=this[0]=e,this.length=1,this):x.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),x.makeArray(e,this))},selector:"",length:0,toArray:function(){return
 g.call(this)},get:function(e){return 
null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var
 t=x.merge(this.constructor(),e);return 
t.prevObject=this,t.context=this.context,t},each:function(e,t){retur
 n x.each(this,e,t)},ready:function(e){return 
x.ready.promise().done(e),this},slice:function(){return 
this.pushStack(g.apply(this,arguments))},first:function(){return 
this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var 
t=this.length,n=+e+(0>e?t:0);return 
this.pushStack(n>=0&>n?[this[n]]:[])},map:function(e){return 
this.pushStack(x.map(this,function(t,n){return 
e.call(t,n,t)}))},end:function(){return 
this.prevObject||this.constructor(null)},push:h,sort:[].sort,splice:[].splice},x.fn.init.prototype=x.fn,x.extend=x.fn.extend=function(){var
 
e,n,r,i,o,a,s=arguments[0]||{},l=1,u=arguments.length,c=!1;for("boolean"==typeof
 s&&(c=s,s=arguments[1]||{},l=2),"object"==typeof 
s||x.isFunction(s)||(s={}),u===l&&(s=this,--l);u>l;l++)if(null!=(o=arguments[l]))for(i
 in 
o)e=s[i],r=o[i],s!==r&&(c&&&(x.isPlainObject(r)||(n=x.isArray(r)))?(n?(n=!1,a=e&(e)?e:[]):a=e&(e)?e:{},s[i]=x.extend(c,a,r)):r!==t&&(s[i]=r));return
 s},x.extend({expando:"jQuery"+(f+Math.ran
 dom()).replace(/\D/g,""),noConflict:function(t){return 
e.$===x&&(e.$=u),t&===x&&(e.jQuery=l),x},isReady:!1,readyWait:1,holdReady:function(e){e?x.readyWait++:x.ready(!0)},ready:function(e){if(e===!0?!--x.readyWait:!x.isReady){if(!a.body)return
 
setTimeout(x.ready);x.isReady=!0,e!==!0&&--x.readyWait>0||(n.resolveWith(a,[x]),x.fn.trigger&(a).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===x.type(e)},isArray:Array.isArray||function(e){return"array"===x.type(e)},isWindow:function(e){return
 
null!=e&==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&(e)},type:function(e){return
 null==e?e+"":"object"==typeof e||"function"==typeof 
e?c[y.call(e)]||"object":typeof e},isPlainObject:function(e){var 
n;if(!e||"object"!==x.type(e)||e.nodeType||x.isWindow(e))return!1;try{if(e.constructor&&!v.call(e,"constructor")&&!v.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(r){return!1}if(x.support.ownLast)for(n
 in e)return v.call(e,n);fo
 r(n in e);return n===t||v.call(e,n)},isEmptyObject:function(e){var t;for(t in 
e)return!1;return!0},error:function(e){throw 
Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return 
null;"boolean"==typeof t&&(n=t,t=!1),t=t||a;var r=k.exec(e),i=!n&&[];return 

[03/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
deleted file mode 100644
index bc3fbc8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! jQuery v1.8.2 jquery.com | jquery.org/license */
-(function(a,b){function G(a){var b=F[a]={};return 
p.each(a.split(s),function(a,c){b[c]=!0}),b}function 
J(a,c,d){if(d===b&===1){var 
e="data-"+c.replace(I,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof 
d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:+d+""===d?+d:H.test(d)?p.parseJSON(d):d}catch(f){}p.data(a,c,d)}else
 d=b}return d}function K(a){var b;for(b in 
a){if(b==="data"&(a[b]))continue;if(b!=="toJSON")return!1}return!0}function
 ba(){return!1}function bb(){return!0}function 
bh(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function bi(a,b){do 
a=a[b];while(a&!==1);return a}function 
bj(a,b,c){b=b||0;if(p.isFunction(b))return p.grep(a,function(a,d){var 
e=!!b.call(a,d,a);return e===c});if(b.nodeType)return 
p.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var 
d=p.grep(a,function(a){return a.nodeType===1});if(be.test(b))return 
p.filter(b,d,!c);b=p.filter(b,d)}return p.grep(a,function(a,d){return p.inArray(
 a,b)>=0===c})}function bk(a){var 
b=bl.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return
 c}function bC(a,b){return 
a.getElementsByTagName(b)[0]||a.appendChild(a.ownerDocument.createElement(b))}function
 bD(a,b){if(b.nodeType!==1||!p.hasData(a))return;var 
c,d,e,f=p._data(a),g=p._data(b,f),h=f.events;if(h){delete 
g.handle,g.events={};for(c in 
h)for(d=0,e=h[c].length;d").appendTo(e.body),c=b.css("display");b.remove();if(c==="none"||c===""){bI=e.body.appendChild(bI||p.extend(e.createElement("iframe"),{frameBorder:0,width:0,height:0}));if(!bJ||!bI.
 
createElement)bJ=(bI.contentWindow||bI.contentDocument).document,bJ.write(""),bJ.close();b=bJ.body.appendChild(bJ.createElement(a)),c=bH(b,"display"),e.body.removeChild(bI)}return
 bS[a]=c,c}function ci(a,b,c,d){var 
e;if(p.isArray(b))p.each(b,function(b,e){c||ce.test(a)?d(a,e):ci(a+"["+(typeof 
e=="object"?b:"")+"]",e,c,d)});else if(!c&(b)==="object")for(e in 
b)ci(a+"["+e+"]",b[e],c,d);else d(a,b)}function cz(a){return 
function(b,c){typeof b!="string"&&(c=b,b="*");var 
d,e,f,g=b.toLowerCase().split(s),h=0,i=g.length;if(p.isFunction(c))for(;h)[^>]*$|#([\w\-]*)$)/,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^[\],:{}\s]*$/,x=/(?:^|:|,)(?:\s*\[)+/g,y=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,z=/"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g,A=/^-ms-/,B=/-([\da-z])/gi,C=function(a,b){return(b+"").toUpperCase()},D=function(){e.addEventListener?(e.removeEventListener("DOMContentLoaded",D,!1),p.ready()):e.readyState==="complete"&&(e.detachEvent("onreadystatechange",D),p.ready())},E={};p.fn=p.prototype={constructor:p,init
 :function(a,c,d){var f,g,h,i;if(!a)return this;if(a.nodeType)return 
this.context=this[0]=a,this.length=1,this;if(typeof 
a=="string"){a.charAt(0)==="<"&(a.length-1)===">"&>=3?f=[null,a,null]:f=u.exec(a);if(f&&(f[1]||!c)){if(f[1])return
 c=c instanceof 
p?c[0]:c,i=c&?c.ownerDocument||c:e,a=p.parseHTML(f[1],i,!0),v.test(f[1])&(c)&(a,c,!0),p.merge(this,a);g=e.getElementById(f[2]);if(g&){if(g.id!==f[2])return
 d.find(a);this.length=1,this[0]=g}return 
this.context=e,this.selector=a,this}return!c||c.jquery?(c||d).find(a):this.constructor(c).find(a)}return
 
p.isFunction(a)?d.ready(a):(a.selector!==b&&(this.selector=a.selector,this.context=a.context),p.makeArray(a,this))},selector:"",jquery:"1.8.2",length:0,size:function(){return
 this.length},toArray:function(){return k.call(this)},get:function(a){return 
a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var
 d=p.merge(this.constructor(),a);ret
 urn 
d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?"
 
":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")"),d},each:function(a,b){return
 p.each(this,a,b)},ready:function(a){return 
p.ready.promise().done(a),this},eq:function(a){return 
a=+a,a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return 
this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return 
this.pushStack(k.apply(this,arguments),"slice",k.call(arguments).join(","))},map:function(a){return
 this.pushStack(p.map(this,function(b,c){return 
a.call(b,c,b)}))},end:function(){return 

[22/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap-editable.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap-editable.min.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap-editable.min.js
deleted file mode 100644
index 539d6c1..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap-editable.min.js
+++ /dev/null
@@ -1,7 +0,0 @@
-/*! X-editable - v1.5.0
-* In-place editing with Twitter Bootstrap, jQuery UI or pure jQuery
-* http://github.com/vitalets/x-editable
-* Copyright (c) 2013 Vitaliy Potapov; Licensed MIT */
-!function(a){"use strict";var 
b=function(b,c){this.options=a.extend({},a.fn.editableform.defaults,c),this.$div=a(b),this.options.scope||(this.options.scope=this)};b.prototype={constructor:b,initInput:function(){this.input=this.options.input,this.value=this.input.str2value(this.options.value),this.input.prerender()},initTemplate:function(){this.$form=a(a.fn.editableform.template)},initButtons:function(){var
 
b=this.$form.find(".editable-buttons");b.append(a.fn.editableform.buttons),"bottom"===this.options.showbuttons&("editable-buttons-bottom")},render:function(){this.$loading=a(a.fn.editableform.loading),this.$div.empty().append(this.$loading),this.initTemplate(),this.options.showbuttons?this.initButtons():this.$form.find(".editable-buttons").remove(),this.showLoading(),this.isSaving=!1,this.$div.triggerHandler("rendering"),this.initInput(),this.$form.find("div.editable-input").append(this.input.$tpl),this.$div.append(this.$form),a.when(this.input.render()).then(a.proxy(fu
 
nction(){if(this.options.showbuttons||this.input.autosubmit(),this.$form.find(".editable-cancel").click(a.proxy(this.cancel,this)),this.input.error)this.error(this.input.error),this.$form.find(".editable-submit").attr("disabled",!0),this.input.$input.attr("disabled",!0),this.$form.submit(function(a){a.preventDefault()});else{this.error(!1),this.input.$input.removeAttr("disabled"),this.$form.find(".editable-submit").removeAttr("disabled");var
 b=null===this.value||void 
0===this.value||""===this.value?this.options.defaultValue:this.value;this.input.value2input(b),this.$form.submit(a.proxy(this.submit,this))}this.$div.triggerHandler("rendered"),this.showForm(),this.input.postrender&()},this))},cancel:function(){this.$div.triggerHandler("cancel")},showLoading:function(){var
 
a,b;this.$form?(a=this.$form.outerWidth(),b=this.$form.outerHeight(),a&$loading.width(a),b&$loading.height(b),this.$form.hide()):(a=this.$loading.parent().width(),a&$loading.wid
 
th(a)),this.$loading.show()},showForm:function(a){this.$loading.hide(),this.$form.show(),a!==!1&(),this.$div.triggerHandler("show")},error:function(b){var
 
c,d=this.$form.find(".control-group"),e=this.$form.find(".editable-error-block");if(b===!1)d.removeClass(a.fn.editableform.errorGroupClass),e.removeClass(a.fn.editableform.errorBlockClass).empty().hide();else{if(b){c=b.split("\n");for(var
 
f=0;f").text(c[f]).html();b=c.join("")}d.addClass(a.fn.editableform.errorGroupClass),e.addClass(a.fn.editableform.errorBlockClass).html(b).show()}},submit:function(b){b.stopPropagation(),b.preventDefault();var
 c,d=this.input.input2value();if(c=this.validate(d))return 
this.error(c),this.showForm(),void 
0;if(!this.options.savenochange&(d)==this.input.value2str(this.value))return
 this.$div.triggerHandler("nochange"),void 0;var 
e=this.input.value2submit(d);this.isSaving=!0,a.when(this.save(e)).done(a.proxy(function(a){this.isSavi
 ng=!1;var b="function"==typeof 
this.options.success?this.options.success.call(this.options.scope,a,d):null;return
 b===!1?(this.error(!1),this.showForm(!1),void 0):"string"==typeof 
b?(this.error(b),this.showForm(),void 0):(b&&"object"==typeof 
b&("newValue")&&(d=b.newValue),this.error(!1),this.value=d,this.$div.triggerHandler("save",{newValue:d,submitValue:e,response:a}),void
 0)},this)).fail(a.proxy(function(a){this.isSaving=!1;var 
b;b="function"==typeof 
this.options.error?this.options.error.call(this.options.scope,a,d):"string"==typeof
 a?a:a.responseText||a.statusText||"Unknown 
error!",this.error(b),this.showForm()},this))},save:function(b){this.options.pk=a.fn.editableutils.tryParseJson(this.options.pk,!0);var
 c,d="function"==typeof 
this.options.pk?this.options.pk.call(this.options.scope):this.options.pk,e=!!("function"==typeof
 
this.options.url||this.options.url&&("always"===this.options.send||"auto"===this.options.send&!==d&
 0!==d));return e?(this.showLoa
 ding(),c={name:this.options.name||"",value:b,pk:d},"function"==typeof 
this.options.params?c=this.options.params.call(this.options.scope,c):(this.options.params=a.fn.editableutils.tryParseJson(this.options.params,!0),a.extend(c,this.options.params)),"function"==typeof
 

[20/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css.map
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css.map
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css.map
new file mode 100644
index 000..d876f60
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"GAIG;ACeH;;EAME,yCAAA;EC2CA,4FAAA;EACQ,oFAAA;CFvDT;ACgBCECsCA,yDAAA;EACQ,iDAAA;CFxCT;ACMC;;ECiCA,yBAAA;EACQ,iBAAA;CFnBT;AC/BD;;EAuBI,kBAAA;CDgBH;ACyBC;;EAEE,uBAAA;CDvBH;AC4BD;EErEI,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;EAuC2C,0BAAA;EAA2B,mBAAA;CDjBvE;ACpBC;;EAEE,0BAAA;EACA,6BAAA;CDsBH;ACnBC;;EAEE,0BAAA;EACA,sBAAA;CDqBH;ACfG;;EAME,0BAAA;EACA,uBAAA;CD6BL;ACbD;EEtEI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;CD8DD;AC5DC;;EAEE,0BAAA;EACA,6BAAA;CD8DH;AC3DC;;EAEE,0BAAA;EACA,sBAAA;CD6DH;ACvDG;;EAME,0BAAA;EACA,uBAAA;CDqEL;ACpDD;EEvEI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;CDsGD;ACpGC;;EAEE,0BAAA;EACA,6B
 
AAA;CDsGH;ACnGC;;EAEE,0BAAA;EACA,sBAAA;CDqGH;AC/FG;;EAME,0BAAA;EACA,uBAAA;CD6GL;AC3FD;EExEI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;CD8ID;AC5IC;;EAEE,0BAAA;EACA,6BAAA;CD8IH;AC3IC;;EAEE,0BAAA;EACA,sBAAA;CD6IH;ACvIG;;EAME,0BAAA;EACA,uBAAA;CDqJL;AClID;EEzEI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;CDsLD;ACpLC;;EAEE,0BAAA;EACA,6BAAA;CDsLH;ACnLC;;EAEE,0BAAA;EACA,sBAAA;CDqLH;AC/KG;;EAME,0BAAA;EACA,uBAAA;CD6LL;ACzKD;EE1EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EAEA,uHAAA;ECnBF,oEAAA;EH4CA,4BAAA;EACA,sBAAA;CD8ND;AC5NC;;EAEE,0BAAA;EACA,6BAAA;CD8NH;AC3NC;;EAEE,0BAAA;EACA,sBAAA;CD6NH;ACvNG;;EAME,0BAAA;EACA,uBAAA;CDqOL;AC1MD;;EClCE,mDAAA;EACQ,2CAAA;CFgPT;ACrMD;;EE3FI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EF0FF,0BAAA;CD2MD;ACzMD;;;EEhGI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EFgGF,0BAAA;CD+MD;ACtMD;EE
 
7GI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;ECnBF,oEAAA;EH+HA,mBAAA;ECjEA,4FAAA;EACQ,oFAAA;CF8QT;ACjND;;EE7GI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;ED2CF,yDAAA;EACQ,iDAAA;CFwRT;AC9MD;;EAEE,+CAAA;CDgND;AC5MD;EEhII,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EACA,4BAAA;EACA,uHAAA;ECnBF,oEAAA;EHkJA,mBAAA;CDkND;ACrND;;EEhII,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;ED2CF,wDAAA;EACQ,gDAAA;CF+ST;AC/ND;;EAYI,0CAAA;CDuNH;AClND;;;EAGE,iBAAA;CDoND;AC/LD;EAfI;;;IAGE,YAAA;IE7JF,yEAAA;IACA,oEAAA;IACA,8FAAA;IAAA,uEAAA;IACA,4BAAA;IACA,uHAAA;GH+WD;CACF;AC3MD;EACE,8CAAA;EC3HA,2FAAA;EACQ,mFAAA;CFyUT;ACnMD;EEtLI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EF8KF,sBAAA;CD+MD;AC1MD;EEvLI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EF8KF,sBAAA;CDuND;ACjND;EExLI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EF8KF,sBAAA;CD+ND;ACxND;EEzLI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EF8K
 
F,sBAAA;CDuOD;ACxND;EEjMI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CH4ZH;ACrND;EE3MI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CHmaH;AC3ND;EE5MI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CH0aH;ACjOD;EE7MI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CHibH;ACvOD;EE9MI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CHwbH;AC7OD;EE/MI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CH+bH;AChPD;EElLI,8MAAA;EACA,yMAAA;EACA,sMAAA;CHqaH;AC5OD;EACE,mBAAA;EC9KA,mDAAA;EACQ,2CAAA;CF6ZT;AC7OD;;;EAGE,8BAAA;EEnOE,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;EFiOF,sBAAA;CDmPD;ACxPD;;;EAQI,kBAAA;CDqPH;AC3OD;ECnME,kDAAA;EACQ,0CAAA;CFibT;ACrOD;EE5PI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CHoeH;AC3OD;EE7PI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CH2eH;ACjPD;EE9PI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,4BAAA;EACA,uHAAA;CHkfH;ACvPD;EE/PI,yEAA
 

[11/25] hadoop git commit: HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan.

2018-06-13 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c7c911/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.js
new file mode 100644
index 000..8a2e99a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.js
@@ -0,0 +1,2377 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under the MIT license
+ */
+
+if (typeof jQuery === 'undefined') {
+  throw new Error('Bootstrap\'s JavaScript requires jQuery')
+}
+
++function ($) {
+  'use strict';
+  var version = $.fn.jquery.split(' ')[0].split('.')
+  if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 
9 && version[2] < 1) || (version[0] > 3)) {
+throw new Error('Bootstrap\'s JavaScript requires jQuery version 1.9.1 or 
higher, but lower than version 4')
+  }
+}(jQuery);
+
+/* 
+ * Bootstrap: transition.js v3.3.7
+ * http://getbootstrap.com/javascript/#transitions
+ * 
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *  */
+
+
++function ($) {
+  'use strict';
+
+  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
+  // 
+
+  function transitionEnd() {
+var el = document.createElement('bootstrap')
+
+var transEndEventNames = {
+  WebkitTransition : 'webkitTransitionEnd',
+  MozTransition: 'transitionend',
+  OTransition  : 'oTransitionEnd otransitionend',
+  transition   : 'transitionend'
+}
+
+for (var name in transEndEventNames) {
+  if (el.style[name] !== undefined) {
+return { end: transEndEventNames[name] }
+  }
+}
+
+return false // explicit for ie8 (  ._.)
+  }
+
+  // http://blog.alexmaccaw.com/css-transitions
+  $.fn.emulateTransitionEnd = function (duration) {
+var called = false
+var $el = this
+$(this).one('bsTransitionEnd', function () { called = true })
+var callback = function () { if (!called) 
$($el).trigger($.support.transition.end) }
+setTimeout(callback, duration)
+return this
+  }
+
+  $(function () {
+$.support.transition = transitionEnd()
+
+if (!$.support.transition) return
+
+$.event.special.bsTransitionEnd = {
+  bindType: $.support.transition.end,
+  delegateType: $.support.transition.end,
+  handle: function (e) {
+if ($(e.target).is(this)) return e.handleObj.handler.apply(this, 
arguments)
+  }
+}
+  })
+
+}(jQuery);
+
+/* 
+ * Bootstrap: alert.js v3.3.7
+ * http://getbootstrap.com/javascript/#alerts
+ * 
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *  */
+
+
++function ($) {
+  'use strict';
+
+  // ALERT CLASS DEFINITION
+  // ==
+
+  var dismiss = '[data-dismiss="alert"]'
+  var Alert   = function (el) {
+$(el).on('click', dismiss, this.close)
+  }
+
+  Alert.VERSION = '3.3.7'
+
+  Alert.TRANSITION_DURATION = 150
+
+  Alert.prototype.close = function (e) {
+var $this= $(this)
+var selector = $this.attr('data-target')
+
+if (!selector) {
+  selector = $this.attr('href')
+  selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip 
for ie7
+}
+
+var $parent = $(selector === '#' ? [] : selector)
+
+if (e) e.preventDefault()
+
+if (!$parent.length) {
+  $parent = $this.closest('.alert')
+}
+
+$parent.trigger(e = $.Event('close.bs.alert'))
+
+if (e.isDefaultPrevented()) return
+
+$parent.removeClass('in')
+
+function removeElement() {
+  // detach from parent, fire event then clean up data
+  $parent.detach().trigger('closed.bs.alert').remove()
+}
+
+$.support.transition && $parent.hasClass('fade') ?
+  $parent
+.one('bsTransitionEnd', removeElement)
+.emulateTransitionEnd(Alert.TRANSITION_DURATION) :
+  removeElement()
+  }
+
+
+  // ALERT PLUGIN DEFINITION
+  // ===
+
+  function Plugin(option) {
+return this.each(function () {
+  var $this = $(this)
+  var data  = $this.data('bs.alert')
+
+  if (!data) $this.data('bs.alert', (data = new Alert(this)))
+  

hadoop git commit: HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh Jain.

2018-06-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b88fe25b -> c42dcc7c4


HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh 
Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c42dcc7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c42dcc7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c42dcc7c

Branch: refs/heads/trunk
Commit: c42dcc7c47340d517563890269c6c112996e8897
Parents: 3b88fe2
Author: Jitendra Pandey 
Authored: Thu Jun 7 23:00:26 2018 -0700
Committer: Jitendra Pandey 
Committed: Thu Jun 7 23:00:26 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c42dcc7c/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8edfd76..8cb5bfc 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -69,7 +69,7 @@
 
 
 1.9.13
-2.9.4
+2.9.5
 
 
 1.7.25


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14067. VersionInfo should load version-info.properties from its own classloader. Contributed by Thejas M Nair.

2018-03-22 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk e196d158a -> 4bea96f9a


HADOOP-14067. VersionInfo should load version-info.properties from its own 
classloader. Contributed by Thejas M Nair.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bea96f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bea96f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bea96f9

Branch: refs/heads/trunk
Commit: 4bea96f9a84cee89d07dfa97b892f6fb3ed1e125
Parents: e196d15
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Mar 22 14:08:18 2018 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Mar 22 14:09:06 2018 -0700

--
 .../java/org/apache/hadoop/util/ThreadUtil.java | 26 ++--
 .../org/apache/hadoop/util/VersionInfo.java |  3 ++-
 2 files changed, 26 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bea96f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 628..2cda8a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -53,8 +53,7 @@ public class ThreadUtil {
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
-   * It first attempts to use the Thread's context classloader and if not
-   * set it uses the class' classloader.
+   * Uses the Thread's context classloader to load resource.
*
* @param resourceName resource to retrieve.
*
@@ -68,6 +67,27 @@ public class ThreadUtil {
   throw new IOException("Can not read resource file '" + resourceName +
   "' because class loader of the current thread is null");
 }
+return getResourceAsStream(cl, resourceName);
+  }
+
+  /**
+   * Convenience method that returns a resource as inputstream from the
+   * classpath using given classloader.
+   * 
+   *
+   * @param cl ClassLoader to be used to retrieve resource.
+   * @param resourceName resource to retrieve.
+   *
+   * @throws IOException thrown if resource cannot be loaded
+   * @return inputstream with the resource.
+   */
+  public static InputStream getResourceAsStream(ClassLoader cl,
+String resourceName)
+throws IOException {
+if (cl == null) {
+  throw new IOException("Can not read resource file '" + resourceName +
+  "' because given class loader is null");
+}
 InputStream is = cl.getResourceAsStream(resourceName);
 if (is == null) {
   throw new IOException("Can not read resource file '" +
@@ -75,4 +95,6 @@ public class ThreadUtil {
 }
 return is;
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bea96f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
index ca09050..ea83502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
@@ -43,7 +43,8 @@ public class VersionInfo {
 String versionInfoFile = component + "-version-info.properties";
 InputStream is = null;
 try {
-  is = ThreadUtil.getResourceAsStream(versionInfoFile);
+  is = ThreadUtil.getResourceAsStream(VersionInfo.class.getClassLoader(),
+  versionInfoFile);
   info.load(is);
 } catch (IOException ex) {
   LoggerFactory.getLogger(getClass()).warn("Could not read '" +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. Contributed by Lokesh Jain.

2018-02-07 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 456705a07 -> b061215ec


HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b061215e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b061215e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b061215e

Branch: refs/heads/trunk
Commit: b061215ecfebe476bf58f70788113d1af816f553
Parents: 456705a
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Feb 7 11:21:41 2018 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Feb 7 11:22:36 2018 -0800

--
 .../org/apache/hadoop/hdfs/ClientContext.java   |  3 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  6 ++-
 .../hdfs/client/impl/BlockReaderFactory.java| 40 +++-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  3 +-
 .../client/impl/TestBlockReaderFactory.java | 33 
 5 files changed, 64 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index a31945c..ad1b359 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -238,7 +239,7 @@ public class ClientContext {
 return byteArrayManager;
   }
 
-  public int getNetworkDistance(DatanodeInfo datanodeInfo) {
+  public int getNetworkDistance(DatanodeInfo datanodeInfo) throws IOException {
 // If applications disable the feature or the client machine can't
 // resolve its network location, clientNode will be set to null.
 if (clientNode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 32e5d0f..2edd755 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -550,7 +550,11 @@ public class DFSUtilClient {
   private static final Map<String, Boolean> localAddrMap = Collections
   .synchronizedMap(new HashMap<String, Boolean>());
 
-  public static boolean isLocalAddress(InetSocketAddress targetAddr) {
+  public static boolean isLocalAddress(InetSocketAddress targetAddr)
+  throws IOException {
+if (targetAddr.isUnresolved()) {
+  throw new IOException("Unresolved host: " + targetAddr);
+}
 InetAddress addr = targetAddr.getAddress();
 Boolean cached = localAddrMap.get(addr.getHostAddress());
 if (cached != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 60dde82..e83c8ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -357,28 +357,32 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   return reader;
 }
 final ShortCircuitConf scConf = conf.getShortCircuitConf();
-if (scConf.isShortCircuitLocalR

hadoop git commit: HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.

2018-01-01 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e20173d55 -> cb60c05c3


HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb60c05c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb60c05c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb60c05c

Branch: refs/heads/branch-2
Commit: cb60c05c3aff762c39d7d81df9a7b6e071acb181
Parents: e20173d
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 1 21:51:48 2018 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 1 21:51:48 2018 -0800

--
 .../java/org/apache/hadoop/security/UserGroupInformation.java   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb60c05c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 20a23ad..2655655 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1174,7 +1174,10 @@ public class UserGroupInformation {
 Object cred = iter.next();
 if (cred instanceof KerberosTicket) {
   KerberosTicket ticket = (KerberosTicket) cred;
-  if (!ticket.getServer().getName().startsWith("krbtgt")) {
+  if (ticket.isDestroyed() || ticket.getServer() == null) {
+LOG.warn("Ticket is already destroyed, remove it.");
+iter.remove();
+  } else if (!ticket.getServer().getName().startsWith("krbtgt")) {
 LOG.warn(
 "The first kerberos ticket is not TGT"
 + "(the server principal is {}), remove and destroy it.",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.

2017-12-27 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 52babbb4a -> d31c9d8c4


HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d31c9d8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d31c9d8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d31c9d8c

Branch: refs/heads/trunk
Commit: d31c9d8c495794a803fb20729b5ed6b374e23eb4
Parents: 52babbb
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Dec 27 23:17:07 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Dec 27 23:17:07 2017 -0800

--
 .../hadoop/security/UserGroupInformation.java   |  5 +-
 .../security/TestFixKerberosTicketOrder.java| 77 
 2 files changed, 81 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index f7aea31..726e811 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1253,7 +1253,10 @@ public class UserGroupInformation {
 Object cred = iter.next();
 if (cred instanceof KerberosTicket) {
   KerberosTicket ticket = (KerberosTicket) cred;
-  if (!ticket.getServer().getName().startsWith("krbtgt")) {
+  if (ticket.isDestroyed() || ticket.getServer() == null) {
+LOG.warn("Ticket is already destroyed, remove it.");
+iter.remove();
+  } else if (!ticket.getServer().getName().startsWith("krbtgt")) {
 LOG.warn(
 "The first kerberos ticket is not TGT"
 + "(the server principal is {}), remove and destroy it.",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
index 4b75a36..cbea393 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
@@ -155,4 +155,81 @@ public class TestFixKerberosTicketOrder extends 
KerberosSecurityTestcase {
 .filter(t -> t.getServer().getName().startsWith(server2Protocol))
 .findAny().isPresent());
   }
+
+  @Test
+  public void testWithDestroyedTGT() throws Exception {
+UserGroupInformation ugi =
+UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal,
+keytabFile.getCanonicalPath());
+ugi.doAs(new PrivilegedExceptionAction() {
+
+  @Override
+  public Void run() throws Exception {
+SaslClient client = Sasl.createSaslClient(
+new String[] {AuthMethod.KERBEROS.getMechanismName()},
+clientPrincipal, server1Protocol, host, props, null);
+client.evaluateChallenge(new byte[0]);
+client.dispose();
+return null;
+  }
+});
+
+Subject subject = ugi.getSubject();
+
+// mark the ticket as destroyed
+for (KerberosTicket ticket : subject
+.getPrivateCredentials(KerberosTicket.class)) {
+  if (ticket.getServer().getName().startsWith("krbtgt")) {
+ticket.destroy();
+break;
+  }
+}
+
+ugi.fixKerberosTicketOrder();
+
+// verify that after fixing, the tgt ticket should be removed
+assertFalse("The first ticket is not tgt",
+subject.getPrivateCredentials().stream()
+.filter(c -> c instanceof KerberosTicket)
+.map(c -> ((KerberosTicket) c).getServer().getName()).findFirst()
+.isPresent());
+
+
+// should fail as we send a service ticket instead of tgt to KDC.
+intercept(SaslException.class,
+() -> ugi.doAs(new PrivilegedExceptionAction() {
+
+  @Override
+  public Void run() throw

hadoop git commit: HDFS-12797. Add Test for NFS mount of not supported filesystems like (file:///). Contributed by Mukul Kumar Singh.

2017-11-09 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3c6adda29 -> 8a1bd9a4f


HDFS-12797. Add Test for NFS mount of not supported filesystems like 
(file:///). Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a1bd9a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a1bd9a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a1bd9a4

Branch: refs/heads/trunk
Commit: 8a1bd9a4f4b8864aa560094a53d43ef732d378e5
Parents: 3c6adda
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Nov 9 23:53:17 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Nov 9 23:53:17 2017 -0800

--
 .../hadoop/hdfs/nfs/nfs3/TestExportsTable.java  | 88 +++-
 1 file changed, 87 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1bd9a4/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
index 211a166..a5c3e7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.nio.file.FileSystemException;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -33,9 +34,14 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 public class TestExportsTable {
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
  
   @Test
   public void testHdfsExportPoint() throws IOException {
@@ -70,7 +76,7 @@ public class TestExportsTable {
   }
 
   @Test
-  public void testViewFsExportPoint() throws IOException {
+  public void testViewFsMultipleExportPoint() throws IOException {
 NfsConfiguration config = new NfsConfiguration();
 MiniDFSCluster cluster = null;
 String clusterName = RandomStringUtils.randomAlphabetic(10);
@@ -183,6 +189,56 @@ public class TestExportsTable {
   }
 
   @Test
+  public void testViewFsRootExportPoint() throws IOException {
+NfsConfiguration config = new NfsConfiguration();
+MiniDFSCluster cluster = null;
+String clusterName = RandomStringUtils.randomAlphabetic(10);
+
+String exportPoint = "/";
+config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
+config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+FsConstants.VIEWFS_SCHEME + "://" + clusterName);
+// Use emphral port in case tests are running in parallel
+config.setInt("nfs3.mountd.port", 0);
+config.setInt("nfs3.server.port", 0);
+config.set("nfs.http.address", "0.0.0.0:0");
+
+try {
+  cluster =
+  new MiniDFSCluster.Builder(config).nnTopology(
+  MiniDFSNNTopology.simpleFederatedTopology(2))
+  .numDataNodes(2)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
+  DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
+  cluster.waitActive();
+  Path base1 = new Path("/user1");
+  Path base2 = new Path("/user2");
+  hdfs1.delete(base1, true);
+  hdfs2.delete(base2, true);
+  hdfs1.mkdirs(base1);
+  hdfs2.mkdirs(base2);
+  ConfigUtil.addLink(config, clusterName, "/hdfs1",
+  hdfs1.makeQualified(base1).toUri());
+  ConfigUtil.addLink(config, clusterName, "/hdfs2",
+  hdfs2.makeQualified(base2).toUri());
+
+  exception.expect(FileSystemException.class);
+  exception.
+  expectMessage("Only HDFS is supported as underlyingFileSystem, "
+  + "fs scheme:viewfs");
+  // Start nfs
+  final Nfs3 nfsServer = new Nfs3(config);
+  nfsServer.startServiceInternal(false);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
+  @Test
   public void testHdfsInternalExportPoint() throws IOE

hadoop git commit: HADOOP-14910. Upgrade netty-all jar to latest 4.0.x.Final. Contributed by Vinayakumar B.

2017-10-10 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk d6602b5f3 -> f29e55bf5


HADOOP-14910. Upgrade netty-all jar to latest 4.0.x.Final. Contributed by 
Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f29e55bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f29e55bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f29e55bf

Branch: refs/heads/trunk
Commit: f29e55bf5e634b3be28c9e0e993f78877528b5c2
Parents: d6602b5
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Oct 10 11:00:07 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Oct 10 11:00:07 2017 -0700

--
 .../hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java  | 3 +--
 hadoop-project/pom.xml| 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29e55bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index bb28110..16380e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -59,8 +59,7 @@ class ParameterParser {
   private final Map<String, List> params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = 
decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH),
-StandardCharsets.UTF_8);
+this.path = decoder.path().substring(WEBHDFS_PREFIX_LENGTH);
 this.params = decoder.parameters();
 this.conf = conf;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29e55bf/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 66d2eb4..8980f0e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -749,7 +749,7 @@
   
 io.netty
 netty-all
-4.0.23.Final
+4.0.52.Final
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-11575. Supporting HDFS NFS gateway with Federated HDFS. Contributed by Mukul Kumar Singh.

2017-10-10 Thread jitendra
HDFS-11575. Supporting HDFS NFS gateway with Federated HDFS. Contributed by 
Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6602b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6602b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6602b5f

Branch: refs/heads/trunk
Commit: d6602b5f39833611b4afa4581552f6c4c37e23a8
Parents: ec8bf9e
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Oct 10 09:49:46 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Oct 10 10:38:05 2017 -0700

--
 .../org/apache/hadoop/nfs/nfs3/FileHandle.java  |  51 ++-
 .../hadoop/nfs/nfs3/request/WRITE3Request.java  |   4 +-
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java |  81 +++--
 .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java| 174 ++---
 .../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java  |  46 +++
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |  19 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java  |   6 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 362 +++
 .../apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java   |   8 +-
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java  |  24 +-
 .../nfs/nfs3/TestClientAccessPrivilege.java |   3 +-
 .../hdfs/nfs/nfs3/TestDFSClientCache.java   |  13 +-
 .../hadoop/hdfs/nfs/nfs3/TestExportsTable.java  | 161 -
 .../hadoop/hdfs/nfs/nfs3/TestReaddir.java   |  19 +-
 .../hdfs/nfs/nfs3/TestRpcProgramNfs3.java   |  66 ++--
 .../hdfs/nfs/nfs3/TestViewfsWithNfs3.java   | 330 +
 .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java |   9 +-
 17 files changed, 1071 insertions(+), 305 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6602b5f/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
index 5b32798..910b8f2 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
@@ -38,17 +38,21 @@ public class FileHandle {
   private static final int HANDLE_LEN = 32;
   private byte[] handle; // Opaque handle
   private long fileId = -1;
+  private int namenodeId = -1;
 
   public FileHandle() {
 handle = null;
   }
 
   /**
-   * Handle is a 32 bytes number. For HDFS, the last 8 bytes is fileId.
+   * Handle is a 32 bytes number. For HDFS, the last 8 bytes is fileId
+   * For ViewFs, last 8 byte is fileId while 4 bytes before that is namenodeId
* @param v file id
+   * @param n namenode id
*/
-  public FileHandle(long v) {
+  public FileHandle(long v, int n) {
 fileId = v;
+namenodeId = n;
 handle = new byte[HANDLE_LEN];
 handle[0] = (byte)(v >>> 56);
 handle[1] = (byte)(v >>> 48);
@@ -58,11 +62,20 @@ public class FileHandle {
 handle[5] = (byte)(v >>> 16);
 handle[6] = (byte)(v >>>  8);
 handle[7] = (byte)(v >>>  0);
-for (int i = 8; i < HANDLE_LEN; i++) {
+
+handle[8] = (byte) (n >>> 24);
+handle[9] = (byte) (n >>> 16);
+handle[10] = (byte) (n >>> 8);
+handle[11] = (byte) (n >>> 0);
+for (int i = 12; i < HANDLE_LEN; i++) {
   handle[i] = (byte) 0;
 }
   }
-  
+
+  public FileHandle(long v) {
+this(v, 0);
+  }
+
   public FileHandle(String s) {
 MessageDigest digest;
 try {
@@ -93,22 +106,32 @@ public class FileHandle {
 return true;
   }
 
-  private long bytesToLong(byte[] data) {
+  private long bytesToLong(byte[] data, int offset) {
 ByteBuffer buffer = ByteBuffer.allocate(8);
 for (int i = 0; i < 8; i++) {
-  buffer.put(data[i]);
+  buffer.put(data[i + offset]);
 }
-buffer.flip();// need flip
+buffer.flip(); // need flip
 return buffer.getLong();
   }
-  
+
+  private int bytesToInt(byte[] data, int offset) {
+ByteBuffer buffer = ByteBuffer.allocate(4);
+for (int i = 0; i < 4; i++) {
+  buffer.put(data[i + offset]);
+}
+buffer.flip(); // need flip
+return buffer.getInt();
+  }
+
   public boolean deserialize(XDR xdr) {
 if (!XDR.verifyLength(xdr, 32)) {
   return false;
 }
 int size = xdr.readInt();
 handle = xdr.readFixedOpaque(size);
-fileId = bytesToLong(handle);
+fileId = bytesToLong(handle, 0);
+namenodeId = bytesToInt(handle, 8);
 return true;
   }
   
@@ -122,11 +145,15 @@ public class FileHandle {
   public

[1/2] hadoop git commit: HDFS-11575. Supporting HDFS NFS gateway with Federated HDFS. Contributed by Mukul Kumar Singh.

2017-10-10 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec8bf9e48 -> d6602b5f3


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6602b5f/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
index f308763..30ecc0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
@@ -186,7 +186,8 @@ public class TestRpcProgramNfs3 {
   public void testGetattr() throws Exception {
 HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
 long dirId = status.getFileId();
-FileHandle handle = new FileHandle(dirId);
+int namenodeId = Nfs3Utils.getNamenodeId(config);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 XDR xdr_req = new XDR();
 GETATTR3Request req = new GETATTR3Request(handle);
 req.serialize(xdr_req);
@@ -209,8 +210,9 @@ public class TestRpcProgramNfs3 {
   public void testSetattr() throws Exception {
 HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
 long dirId = status.getFileId();
+int namenodeId = Nfs3Utils.getNamenodeId(config);
 XDR xdr_req = new XDR();
-FileHandle handle = new FileHandle(dirId);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
 EnumSet.of(SetAttrField.UID));
 SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
@@ -234,7 +236,8 @@ public class TestRpcProgramNfs3 {
   public void testLookup() throws Exception {
 HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
 long dirId = status.getFileId();
-FileHandle handle = new FileHandle(dirId);
+int namenodeId = Nfs3Utils.getNamenodeId(config);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
 XDR xdr_req = new XDR();
 lookupReq.serialize(xdr_req);
@@ -257,7 +260,8 @@ public class TestRpcProgramNfs3 {
   public void testAccess() throws Exception {
 HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
 long dirId = status.getFileId();
-FileHandle handle = new FileHandle(dirId);
+int namenodeId = Nfs3Utils.getNamenodeId(config);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 XDR xdr_req = new XDR();
 ACCESS3Request req = new ACCESS3Request(handle);
 req.serialize(xdr_req);
@@ -281,8 +285,9 @@ public class TestRpcProgramNfs3 {
 // Create a symlink first.
 HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
 long dirId = status.getFileId();
+int namenodeId = Nfs3Utils.getNamenodeId(config);
 XDR xdr_req = new XDR();
-FileHandle handle = new FileHandle(dirId);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
 "bar");
 req.serialize(xdr_req);
@@ -316,7 +321,8 @@ public class TestRpcProgramNfs3 {
   public void testRead() throws Exception {
 HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
 long dirId = status.getFileId();
-FileHandle handle = new FileHandle(dirId);
+int namenodeId = Nfs3Utils.getNamenodeId(config);
+FileHandle handle = new FileHandle(dirId, namenodeId);
 
 READ3Request readReq = new READ3Request(handle, 0, 5);
 XDR xdr_req = new XDR();
@@ -373,7 +379,8 @@ public class TestRpcProgramNfs3 {
 
 final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
 final long dirId = status.getFileId();
-final FileHandle handle = new FileHandle(dirId);
+final int namenodeId = Nfs3Utils.getNamenodeId(config);
+final FileHandle handle = new FileHandle(dirId, namenodeId);
 
 final WRITE3Request writeReq = new WRITE3Request(handle, 0,
 buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
@@ -390,7 +397,8 @@ public class TestRpcProgramNfs3 {
   throws Exception {
 final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
 final long dirId = status.getFileId();
-final FileHandle handle = new FileHandle(dirId);
+final int namenodeId = Nfs3Utils.getNamenodeId(config);
+final FileHandle handle = new FileHandle(dirId, namenodeId);
 
 final READ3Request readReq = new READ3Request(handle, 0, len);
 final XDR xdr_req = new XDR();
@@ -422,7 +430,8 @@ public class TestRpcProgramNfs3 {
   private void commit(String fileName, int len) throws Exception {
 final HdfsFileStatus status = 

hadoop git commit: HDFS-12486. GetConf to get journalnodeslist. Contributed by Bharat Viswanadham.

2017-09-22 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk e1b32e095 -> cda337865


HDFS-12486. GetConf to get journalnodeslist. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cda33786
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cda33786
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cda33786

Branch: refs/heads/trunk
Commit: cda3378659772f20fd951ae342dc7d9d6db29534
Parents: e1b32e0
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Sep 22 16:52:47 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Sep 22 16:52:47 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  83 +
 .../org/apache/hadoop/hdfs/tools/GetConf.java   |  31 +++-
 .../apache/hadoop/hdfs/tools/TestGetConf.java   | 175 ++-
 4 files changed, 283 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda33786/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index e7cd0d8..3b09f39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -406,7 +406,7 @@ public class DFSUtilClient {
* @param keys list of keys in the order of preference
* @return value of the key or default if a key was not found in 
configuration
*/
-  private static String getConfValue(String defaultValue, String keySuffix,
+  public static String getConfValue(String defaultValue, String keySuffix,
   Configuration conf, String... keys) {
 String value = null;
 for (String key : keys) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda33786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 7776dc2..32a1cae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -31,6 +31,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADD
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
@@ -44,6 +45,8 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.net.UnknownHostException;
+
 import java.security.SecureRandom;
 import java.util.Arrays;
 import java.util.Collection;
@@ -74,6 +77,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
@@ -454,6 +458,85 @@ public class DFSUtil {
   }
 
   /**
+   * Returns list of Journalnode addresses from the configuration.
+   *
+   * @param conf configuration
+   * @return list of journalnode host names
+   * @throws URISyntaxException
+   * @throws IOException
+   */
+  public static Set getJournalNodeAddresses(
+  Configuration conf) throws URISyntaxException, IOException {
+Set journalNodeList = new HashSet<>();
+String journalsUri = "";
+try {
+  journalsUri = conf.get(DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
+  if (journalsUri == null) {
+Collection nameserviceIds = DFSUtilClient.
+   

hadoop git commit: HADOOP-14674. Correct javadoc for getRandomizedTempPath. Contributed by Mukul Kumar Singh.

2017-09-01 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 99a7f5d45 -> 063b6d0c9


HADOOP-14674. Correct javadoc for getRandomizedTempPath. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/063b6d0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/063b6d0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/063b6d0c

Branch: refs/heads/trunk
Commit: 063b6d0c93d700a57a7c6c29fdd1bcdecd0b9dc0
Parents: 99a7f5d
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Aug 31 21:26:31 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Sep 1 10:17:12 2017 -0700

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/063b6d0c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 9291bb0..4cb9f8b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -274,7 +274,6 @@ public abstract class GenericTestUtils {
* Get a temp path. This may or may not be relative; it depends on what the
* {@link #SYSPROP_TEST_DATA_DIR} is set to. If unset, it returns a path
* under the relative path {@link #DEFAULT_TEST_DATA_PATH}
-   * @param subpath sub path, with no leading "/" character
* @return a string to use in paths
*/
   public static String getRandomizedTempPath() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-2319. Add test cases for FSshell -stat. Contributed by XieXianshan and Bharat Viswanadham.

2017-07-27 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5f4808ce7 -> e3c730022


HDFS-2319. Add test cases for FSshell -stat. Contributed by XieXianshan and 
Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3c73002
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3c73002
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3c73002

Branch: refs/heads/trunk
Commit: e3c73002250a21a771689081b51764eca1d862a7
Parents: 5f4808c
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Jul 27 13:23:15 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Jul 27 13:23:15 2017 -0700

--
 .../src/test/resources/testHDFSConf.xml | 125 ++-
 1 file changed, 124 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3c73002/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 9302507..ba90efa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -7203,7 +7203,130 @@
 
   
 
-  
+
+ 
+  stat: Test for hdfs:// path - user/group name for 
directory
+  
+-fs NAMENODE -mkdir hdfs:///dirtest
+-fs NAMENODE -chown hadoop:hadoopgrp hdfs:///dirtest
+-fs NAMENODE -stat "%u-%g" hdfs:///dirtest
+  
+  
+-fs NAMENODE -rm -r hdfs:///dirtest
+  
+  
+
+  TokenComparator
+  hadoop-hadoopgrp
+
+  
+
+
+ 
+  stat: Test for hdfs:// path - user/group name for 
file
+  
+-fs NAMENODE -put CLITEST_DATA/data60bytes 
hdfs:///data60bytes
+-fs NAMENODE -chown hadoop:hadoopgrp 
hdfs:data60bytes
+-fs NAMENODE -stat "%u-%g" hdfs:data60bytes
+  
+  
+-fs NAMENODE -rm -r hdfs:///data60bytes
+  
+  
+
+  TokenComparator
+  hadoop-hadoopgrp
+
+  
+
+
+ 
+  stat: Test for hdfs:// path - user/group name for multiple 
files
+  
+-fs NAMENODE -put CLITEST_DATA/data60bytes 
hdfs:///data60bytes
+-fs NAMENODE -put CLITEST_DATA/data30bytes 
hdfs:///data30bytes
+-fs NAMENODE -chown hadoop:hadoopgrp 
hdfs:///data60bytes
+-fs NAMENODE -chown hdfs:hdfs hdfs:///data30bytes
+-fs NAMENODE -stat "%u-%g" hdfs:///data*
+  
+  
+-fs NAMENODE -rm -r hdfs:///data60bytes
+-fs NAMENODE -rm -r hdfs:data30bytes
+  
+  
+
+  TokenComparator
+  hadoop-hadoopgrp
+
+
+  TokenComparator
+  hdfs-hdfs
+
+  
+
+
+ 
+  stat: Test for Namenode's path - user/group name for 
directory
+  
+-fs NAMENODE -mkdir /dir0
+-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/
+-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/
+  
+  
+-fs NAMENODE -rm -r NAMENODE/dir0
+  
+  
+
+  TokenComparator
+  hadoop-hadoopgrp
+
+  
+
+
+ 
+stat: Test for Namenode's path - user/group name for file 

+
+-fs NAMENODE -mkdir /dir0
+-fs NAMENODE -put CLITEST_DATA/data15bytes 
NAMENODE/dir0/data15bytes
+-fs NAMENODE -chown hadoop:hadoopgrp 
NAMENODE/dir0/data15bytes
+-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data15bytes
+
+
+  -fs NAMENODE -rm -r NAMENODE/dir0
+
+
+  
+TokenComparator
+hadoop-hadoopgrp
+  
+
+
+
+ 
+  stat: Test for Namenode's path - user/group name for 
multiple files 
+  
+-fs NAMENODE -mkdir /dir0
+-fs NAMENODE -put CLITEST_DATA/data15bytes 
NAMENODE/dir0/data15bytes
+-fs NAMENODE -put CLITEST_DATA/data30bytes 
NAMENODE/dir0/data30bytes
+-fs NAMENODE -chown hadoop:hadoopgrp 
NAMENODE/dir0/data15bytes
+-fs NAMENODE -chown hdfs:hdfs 
NAMENODE/dir0/data30bytes
+-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data*
+  
+  
+-fs NAMENODE -rm -r NAMENODE/dir0
+  
+  
+
+  TokenComparator
+  hadoop-hadoopgrp
+
+
+  TokenComparator
+  hdfs-hdfs
+
+  
+
+
 
  
   tail: contents of file(absolute path)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. Contributed by Thomas Marquardt.

2017-07-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 00f4a80e8 -> 554c3cd21


HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. 
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/554c3cd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/554c3cd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/554c3cd2

Branch: refs/heads/branch-2
Commit: 554c3cd21d2cbfc1901a4233a24a7a86d05a6a22
Parents: 00f4a80
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Jul 25 16:26:48 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Jul 25 16:35:40 2017 -0700

--
 .../hadoop/fs/azure/BlockBlobInputStream.java   |  2 +-
 .../fs/azure/TestBlockBlobInputStream.java  | 50 +++-
 2 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/554c3cd2/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
index 2ed0686..5542415 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
@@ -358,7 +358,7 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
  * Gets the current capacity of the stream.
  */
 public synchronized int capacity() {
-  return length - offset;
+  return length;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/554c3cd2/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
index 2db063b..2453584 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
@@ -43,8 +43,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
 
-import static org.junit.Assert.*;
-import static org.junit.Assume.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
 
 import static org.apache.hadoop.test.LambdaTestUtils.*;
 
@@ -194,6 +197,49 @@ public class TestBlockBlobInputStream extends 
AbstractWasbTestBase {
 createTestFileAndSetLength();
   }
 
+  @Test
+  public void test_0200_BasicReadTestV2() throws Exception {
+assumeHugeFileExists();
+
+try (
+FSDataInputStream inputStreamV1
+= accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+FSDataInputStream inputStreamV2
+= accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+) {
+  byte[] bufferV1 = new byte[3 * MEGABYTE];
+  byte[] bufferV2 = new byte[bufferV1.length];
+
+  // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
+  inputStreamV1.seek(5 * MEGABYTE);
+  int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
+  assertEquals(numBytesReadV1, KILOBYTE);
+
+  // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
+  inputStreamV2.seek(5 * MEGABYTE);
+  int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
+  assertEquals(numBytesReadV2, KILOBYTE);
+
+  assertArrayEquals(bufferV1, bufferV2);
+
+  int len = MEGABYTE;
+  int offset = bufferV1.length - len;
+
+  // v1 reverse seek and read a megabyte into last megabyte of bufferV1
+  inputStreamV1.seek(3 * MEGABYTE);
+  numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
+  assertEquals(numBytesReadV1, len);
+
+  // v2 reverse seek and read a megabyte into last megabyte of bufferV2
+  inputStreamV2.seek(3 * MEGABYTE);
+  numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
+  assertEquals(numBytesReadV2, len);
+
+  assertArrayEquals(bufferV1, bufferV2);
+}
+  }
+
   /**
* Validates the implementation of InputStrea

hadoop git commit: HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. Contributed by Thomas Marquardt.

2017-07-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk f81a4efb8 -> a92bf39e2


HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. 
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92bf39e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92bf39e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92bf39e

Branch: refs/heads/trunk
Commit: a92bf39e2313d4bfccd641ce0ccefe26f4903a69
Parents: f81a4ef
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Jul 25 16:26:48 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Jul 25 16:26:48 2017 -0700

--
 .../hadoop/fs/azure/BlockBlobInputStream.java   |  2 +-
 .../fs/azure/TestBlockBlobInputStream.java  | 50 +++-
 2 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92bf39e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
index 2ed0686..5542415 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
@@ -358,7 +358,7 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
  * Gets the current capacity of the stream.
  */
 public synchronized int capacity() {
-  return length - offset;
+  return length;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92bf39e/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
index 2db063b..2453584 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
@@ -43,8 +43,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
 
-import static org.junit.Assert.*;
-import static org.junit.Assume.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
 
 import static org.apache.hadoop.test.LambdaTestUtils.*;
 
@@ -194,6 +197,49 @@ public class TestBlockBlobInputStream extends 
AbstractWasbTestBase {
 createTestFileAndSetLength();
   }
 
+  @Test
+  public void test_0200_BasicReadTestV2() throws Exception {
+assumeHugeFileExists();
+
+try (
+FSDataInputStream inputStreamV1
+= accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+FSDataInputStream inputStreamV2
+= accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+) {
+  byte[] bufferV1 = new byte[3 * MEGABYTE];
+  byte[] bufferV2 = new byte[bufferV1.length];
+
+  // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
+  inputStreamV1.seek(5 * MEGABYTE);
+  int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
+  assertEquals(numBytesReadV1, KILOBYTE);
+
+  // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
+  inputStreamV2.seek(5 * MEGABYTE);
+  int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
+  assertEquals(numBytesReadV2, KILOBYTE);
+
+  assertArrayEquals(bufferV1, bufferV2);
+
+  int len = MEGABYTE;
+  int offset = bufferV1.length - len;
+
+  // v1 reverse seek and read a megabyte into last megabyte of bufferV1
+  inputStreamV1.seek(3 * MEGABYTE);
+  numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
+  assertEquals(numBytesReadV1, len);
+
+  // v2 reverse seek and read a megabyte into last megabyte of bufferV2
+  inputStreamV2.seek(3 * MEGABYTE);
+  numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
+  assertEquals(numBytesReadV2, len);
+
+  assertArrayEquals(bufferV1, bufferV2);
+}
+  }
+
   /**
* Validates the implementation of InputStream.markSupported

hadoop git commit: HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. Contributed by Georgi Chalakov.

2017-07-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9fe5dd098 -> ab95086ed


HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. 
Contributed by Georgi Chalakov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab95086e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab95086e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab95086e

Branch: refs/heads/branch-2
Commit: ab95086ed1c905eb11b4591227ed616208b07019
Parents: 9fe5dd0
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 24 13:59:27 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Jul 25 08:31:35 2017 -0700

--
 .../src/main/resources/core-default.xml |  10 +-
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../fs/azure/AzureNativeFileSystemStore.java| 144 +++
 .../hadoop-azure/src/site/markdown/index.md |  13 ++
 .../fs/azure/TestWasbUriAndConfiguration.java   |  48 +++
 .../src/test/resources/azure-test.xml   |   5 +
 6 files changed, 162 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab95086e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4d6b19e..f72afc5 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -508,7 +508,15 @@
 name to use for the service when the client wishes to make an RPC call.
   
 
-
+  
+fs.azure.user.agent.prefix
+unknown
+
+  WASB passes User-Agent header to the Azure back-end. The default value
+  contains WASB version, Java Runtime version, Azure Client library 
version,
+  and the value of the configuration option fs.azure.user.agent.prefix.
+
+  
 
 
 hadoop.security.uid.cache.secs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab95086e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 65e452e..97c6279 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -112,6 +112,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
 xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
+xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
 
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab95086e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 56bac06..f4aff95 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
 import org.mortbay.util.ajax.JSON;
+import org.apache.hadoop.util.VersionInfo;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -71,6 +72,10 @@ import 
com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
 import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
 import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.core.BaseRequest;
+import c

hadoop git commit: HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. Contributed by Georgi Chalakov.

2017-07-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk c98201b5d -> f2921e51f


HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. 
Contributed by Georgi Chalakov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2921e51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2921e51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2921e51

Branch: refs/heads/trunk
Commit: f2921e51f0fe613abce0a9f415a0d8ab6144aa6e
Parents: c98201b
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 24 13:59:27 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 24 23:01:01 2017 -0700

--
 .../src/main/resources/core-default.xml |  10 +-
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../fs/azure/AzureNativeFileSystemStore.java| 144 +++
 .../hadoop-azure/src/site/markdown/index.md |  13 ++
 .../fs/azure/TestWasbUriAndConfiguration.java   |  48 +++
 .../src/test/resources/azure-test.xml   |   5 +
 6 files changed, 162 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 68b0a9d..d5ddc7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -499,7 +499,15 @@
 name to use for the service when the client wishes to make an RPC call.
   
 
-
+  
+fs.azure.user.agent.prefix
+unknown
+
+  WASB passes User-Agent header to the Azure back-end. The default value
+  contains WASB version, Java Runtime version, Azure Client library 
version,
+  and the value of the configuration option fs.azure.user.agent.prefix.
+
+  
 
 
 hadoop.security.uid.cache.secs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 593254eb..ef74cba 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -116,6 +116,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
 xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
+xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 6b6f07a..7c198af 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,6 +72,10 @@ import 
com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
 import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
 import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.StorageEvent;
+impor

hadoop git commit: HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.

2017-07-19 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e0297ffbc -> 23b920cd7


HADOOP-14642. wasb: add support for caching Authorization and SASKeys. 
Contributed by Sivaguru Sankaridurg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b920cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b920cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b920cd

Branch: refs/heads/branch-2
Commit: 23b920cd7ab23ad71adc75439e8bb6ec5a7924bd
Parents: e0297ff
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Jul 19 00:38:45 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Jul 19 11:37:36 2017 -0700

--
 .../src/main/resources/core-default.xml |   9 +
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../hadoop/fs/azure/CachingAuthorizer.java  | 232 +++
 .../fs/azure/LocalSASKeyGeneratorImpl.java  |  28 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   3 -
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  46 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  38 ++-
 .../hadoop/fs/azure/SASKeyGeneratorImpl.java|   4 +-
 .../hadoop-azure/src/site/markdown/index.md |  38 +++
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   5 +
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFSAuthorizationCaching.java  |  60 +
 .../TestNativeAzureFileSystemAuthorization.java |  86 ++-
 ...veAzureFileSystemAuthorizationWithOwner.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../src/test/resources/azure-test.xml   |   3 +-
 16 files changed, 500 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 0ea607f..4d6b19e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1347,6 +1347,15 @@
   
 
 
+
+  fs.azure.authorization.caching.enable
+  true
+  
+Config flag to enable caching of authorization results and saskeys in WASB.
+This flag is relevant only when fs.azure.authorization is enabled.
+  
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 30e08d5..65e452e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -111,6 +111,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
+xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
 
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 000..016ae74
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apac

hadoop git commit: HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.

2017-07-19 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 845c4e52b -> 2843c688b


HADOOP-14642. wasb: add support for caching Authorization and SASKeys. 
Contributed by Sivaguru Sankaridurg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2843c688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2843c688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2843c688

Branch: refs/heads/trunk
Commit: 2843c688bcc21c65eb3538ffb3caeaffe440eda8
Parents: 845c4e5
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Jul 19 00:13:06 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Jul 19 00:13:06 2017 -0700

--
 .../src/main/resources/core-default.xml |   9 +-
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../hadoop/fs/azure/CachingAuthorizer.java  | 232 +++
 .../fs/azure/LocalSASKeyGeneratorImpl.java  |  28 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   3 -
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  46 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  38 ++-
 .../hadoop/fs/azure/SASKeyGeneratorImpl.java|   4 +-
 .../hadoop-azure/src/site/markdown/index.md |  38 +++
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   5 +
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFSAuthorizationCaching.java  |  60 +
 .../TestNativeAzureFileSystemAuthorization.java |  86 ++-
 ...veAzureFileSystemAuthorizationWithOwner.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../src/test/resources/azure-test.xml   |   3 +-
 16 files changed, 499 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a705a4e..68b0a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1343,7 +1343,14 @@
 configuration
   
 
-
+
+  fs.azure.authorization.caching.enable
+  true
+  
+Config flag to enable caching of authorization results and saskeys in WASB.
+This flag is relevant only when fs.azure.authorization is enabled.
+  
+
 
 
   io.seqfile.compress.blocksize

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 8524973..593254eb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -115,6 +115,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
+xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 000..016ae74
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the 

hadoop git commit: HADOOP-14640. Azure: Support affinity for service running on localhost and reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token generation. Contributed by

2017-07-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 02627f8c5 -> b9f2effaf


HADOOP-14640. Azure: Support affinity for service running on localhost and 
reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token 
generation. Contributed by Santhosh G Nayak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f2effa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f2effa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f2effa

Branch: refs/heads/branch-2
Commit: b9f2effaf6782885fbade07436fcf26aad4713a4
Parents: 02627f8c
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 17 02:27:55 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 17 16:25:05 2017 -0700

--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  8 +-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  8 +-
 .../fs/azure/SecureWasbRemoteCallHelper.java| 86 
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 61 +++---
 .../hadoop/fs/azure/security/Constants.java | 19 +++--
 .../RemoteWasbDelegationTokenManager.java   | 27 +++---
 .../hadoop/fs/azure/security/SpnegoToken.java   | 49 +++
 .../fs/azure/TestWasbRemoteCallHelper.java  | 58 -
 8 files changed, 245 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f2effa/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 2b55d92..cc27905 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -104,10 +104,11 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
*/
   private static final String
   SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls;
 
@@ -126,13 +127,16 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 
 this.isKerberosSupportEnabled =
 conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+this.isSpnegoTokenCacheEnabled =
+conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
 this.commaSeparatedUrls = conf.getTrimmedStrings(KEY_CRED_SERVICE_URLS);
 if (this.commaSeparatedUrls == null || this.commaSeparatedUrls.length <= 
0) {
   throw new IOException(
   KEY_CRED_SERVICE_URLS + " config not set" + " in configuration.");
 }
 if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
-  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false);
+  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false,
+  isSpnegoTokenCacheEnabled);
 } else {
   this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f2effa/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index a0eb39e..f50fc01 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -93,10 +93,11 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
* Authorization Remote http client retry policy spec default value. {@value}
*/
   private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT 
=
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls = null;
 
@@ -111

hadoop git commit: HADOOP-14640. Azure: Support affinity for service running on localhost and reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token generation. Contributed by

2017-07-17 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb3b5d33f -> b0e78ae08


HADOOP-14640. Azure: Support affinity for service running on localhost and 
reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token 
generation. Contributed by Santhosh G Nayak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e78ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e78ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e78ae0

Branch: refs/heads/trunk
Commit: b0e78ae085928c82ae63a05a29a628c2e289c0fc
Parents: fb3b5d3
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 17 02:27:55 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 17 02:27:55 2017 -0700

--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  8 +-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  8 +-
 .../fs/azure/SecureWasbRemoteCallHelper.java| 86 
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 61 +++---
 .../hadoop/fs/azure/security/Constants.java | 19 +++--
 .../RemoteWasbDelegationTokenManager.java   | 27 +++---
 .../hadoop/fs/azure/security/SpnegoToken.java   | 49 +++
 .../fs/azure/TestWasbRemoteCallHelper.java  | 58 -
 8 files changed, 245 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 87f3b0b..a7cedea 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -105,10 +105,11 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
*/
   private static final String
   SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls;
 
@@ -127,13 +128,16 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 
 this.isKerberosSupportEnabled =
 conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+this.isSpnegoTokenCacheEnabled =
+conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
 this.commaSeparatedUrls = conf.getTrimmedStrings(KEY_CRED_SERVICE_URLS);
 if (this.commaSeparatedUrls == null || this.commaSeparatedUrls.length <= 
0) {
   throw new IOException(
   KEY_CRED_SERVICE_URLS + " config not set" + " in configuration.");
 }
 if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
-  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false);
+  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false,
+  isSpnegoTokenCacheEnabled);
 } else {
   this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index e2d515c..cd4e0a3 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -93,10 +93,11 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
* Authorization Remote http client retry policy spec default value. {@value}
*/
   private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT 
=
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls = null;
 
@@ -111,6 +112,8 @@ publ

hadoop git commit: HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is thread-unsafe. Contributed by Benoy Antony and Rakesh R.

2017-07-10 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d94b30cb0 -> b82485d6f


HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is 
thread-unsafe. Contributed by Benoy Antony and Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82485d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82485d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82485d6

Branch: refs/heads/branch-2
Commit: b82485d6fed2194bf0dc2eedfab7e226e30a7cf0
Parents: d94b30c
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 7 12:45:37 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 10 17:55:02 2017 -0700

--
 .../hadoop/security/alias/CredentialProviderFactory.java  | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82485d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
index d1e3eb5..1b2ac41 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 import java.util.ServiceLoader;
 
@@ -49,6 +50,15 @@ public abstract class CredentialProviderFactory {
   ServiceLoader.load(CredentialProviderFactory.class,
   CredentialProviderFactory.class.getClassLoader());
 
+  // Iterate through the serviceLoader to avoid lazy loading.
+  // Lazy loading would require synchronization in concurrent use cases.
+  static {
+Iterator iterServices = 
serviceLoader.iterator();
+while (iterServices.hasNext()) {
+  iterServices.next();
+}
+  }
+
   public static List getProviders(Configuration conf
) throws IOException {
 List result = new ArrayList();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is thread-unsafe. Contributed by Benoy Antony and Rakesh R.

2017-07-10 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5496a34c0 -> f1efa14fc


HADOOP-10829. Iteration on CredentialProviderFactory.serviceLoader is 
thread-unsafe. Contributed by Benoy Antony and Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1efa14f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1efa14f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1efa14f

Branch: refs/heads/trunk
Commit: f1efa14fc676641fa15c11d3147e3ad948b084e9
Parents: 5496a34
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 7 12:45:37 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 10 17:48:27 2017 -0700

--
 .../hadoop/security/alias/CredentialProviderFactory.java  | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1efa14f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
index d1e3eb5..1b2ac41 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 import java.util.ServiceLoader;
 
@@ -49,6 +50,15 @@ public abstract class CredentialProviderFactory {
   ServiceLoader.load(CredentialProviderFactory.class,
   CredentialProviderFactory.class.getClassLoader());
 
+  // Iterate through the serviceLoader to avoid lazy loading.
+  // Lazy loading would require synchronization in concurrent use cases.
+  static {
+Iterator iterServices = 
serviceLoader.iterator();
+while (iterServices.hasNext()) {
+  iterServices.next();
+}
+  }
+
   public static List getProviders(Configuration conf
) throws IOException {
 List result = new ArrayList();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak.

2017-07-10 Thread jitendra
HADOOP-14443. Azure: Support retry and client side failover for authorization, 
SASKey and delegation token generation. Contributed by Santhosh G Nayak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d94b30cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d94b30cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d94b30cb

Branch: refs/heads/branch-2
Commit: d94b30cb03a5278b80cbd9da3b12018af2d734db
Parents: 20a2770
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 10 17:30:31 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 10 17:30:31 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  39 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 274 +++
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 229 ++--
 .../fs/azure/SecureWasbRemoteCallHelper.java| 210 ++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 259 +-
 .../hadoop/fs/azure/security/Constants.java |  20 +-
 .../hadoop/fs/azure/security/JsonUtils.java |  53 
 .../RemoteWasbDelegationTokenManager.java   | 162 +++
 .../hadoop/fs/azure/security/SecurityUtils.java |  86 --
 .../hadoop/fs/azure/security/TokenUtils.java|  60 
 .../security/WasbDelegationTokenManager.java|  54 
 .../fs/azure/security/WasbTokenRenewer.java |  77 +-
 .../hadoop-azure/src/site/markdown/index.md |  44 ++-
 .../TestNativeAzureFileSystemAuthorization.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  | 230 +---
 15 files changed, 1178 insertions(+), 621 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d94b30cb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index fafa85d..392c4c8 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -27,9 +27,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.URL;
 import java.nio.charset.Charset;
-import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -60,15 +58,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager;
+import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
-import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.codehaus.jackson.JsonNode;
@@ -1175,7 +1172,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   private UserGroupInformation ugi;
 
-  private String delegationToken = null;
+  private WasbDelegationTokenManager wasbDelegationTokenManager;
 
   public NativeAzureFileSystem() {
 // set store in initialize()
@@ -1325,9 +1322,7 @@ public class NativeAzureFileSystem extends FileSystem {
 }
 
 if (UserGroupInformation.isSecurityEnabled() && kerberosSupportEnabled) {
-  DelegationTokenAuthenticator authenticator = new 
KerberosDelegationTokenAuthenticator();
-  authURL = new DelegationTokenAuthenticatedURL(authenticator);
-  credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
+  this.wasbDelegationTokenManager = new 
RemoteWasbDelegationTokenManager(conf);
 }
   }
 
@@ -3010,31 +3005,7 @@ public class NativeAzureFileSystem extends FileSystem {
   @Override
   public synchronized Token getDelegationToken(final String renewer) throws 
IOException {
 if (kerberosSupportE

[1/2] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak.

2017-07-10 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 20a2770d7 -> d94b30cb0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d94b30cb/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a0276cb5..fbd7f62 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -46,7 +46,7 @@ public class TestNativeAzureFileSystemAuthorization
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d94b30cb/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index 059cce3..efdea8b 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -21,34 +21,48 @@ package org.apache.hadoop.fs.azure;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.http.*;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
 import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 
 import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
 
 /**
  * Test class to hold all WasbRemoteCallHelper tests
  */
 public class TestWasbRemoteCallHelper
 extends AbstractWasbTestBase {
+  public static final String EMPTY_STRING = "";
+  private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost1/,http://localhost2/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 
@@ -80,7 +94,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(999));
+
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
 // finished setting up mocks
 
 performop(mockHttpClient);
@@ -99,7 +113,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+

hadoop git commit: HDFS-11533. reuseAddress option should be used for child channels in Portmap and SimpleTcpServer. Contributed by Mukul Kumar Singh.

2017-03-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0baabc24 -> d564d1f44


HDFS-11533. reuseAddress option should be used for child channels in Portmap 
and SimpleTcpServer. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d564d1f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d564d1f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d564d1f4

Branch: refs/heads/branch-2
Commit: d564d1f44d0b3679009a601385f1d0f9e01624c6
Parents: b0baabc
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Mar 16 11:40:12 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Mar 16 12:37:22 2017 -0700

--
 .../src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java | 1 +
 .../hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d564d1f4/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index f7ab52e..bd48b15 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -81,6 +81,7 @@ public class SimpleTcpServer {
 });
 server.setOption("child.tcpNoDelay", true);
 server.setOption("child.keepAlive", true);
+server.setOption("child.reuseAddress", true);
 server.setOption("reuseAddress", true);
 
 // Listen to TCP port

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d564d1f4/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
index 94d76d0..7586fda 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
@@ -110,6 +110,7 @@ final class Portmap {
   }
 });
 tcpServer.setOption("reuseAddress", true);
+tcpServer.setOption("child.reuseAddress", true);
 
 udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
 Executors.newCachedThreadPool()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11533. reuseAddress option should be used for child channels in Portmap and SimpleTcpServer. Contributed by Mukul Kumar Singh.

2017-03-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79ede403e -> 09ad8effb


HDFS-11533. reuseAddress option should be used for child channels in Portmap 
and SimpleTcpServer. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09ad8eff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09ad8eff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09ad8eff

Branch: refs/heads/trunk
Commit: 09ad8effb825eddbf0ee2ef591a0d16a58468f56
Parents: 79ede40
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Mar 16 11:40:12 2017 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Mar 16 12:20:46 2017 -0700

--
 .../src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java | 1 +
 .../hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ad8eff/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index f7ab52e..bd48b15 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -81,6 +81,7 @@ public class SimpleTcpServer {
 });
 server.setOption("child.tcpNoDelay", true);
 server.setOption("child.keepAlive", true);
+server.setOption("child.reuseAddress", true);
 server.setOption("reuseAddress", true);
 
 // Listen to TCP port

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ad8eff/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
index 94d76d0..7586fda 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
@@ -110,6 +110,7 @@ final class Portmap {
   }
 });
 tcpServer.setOption("reuseAddress", true);
+tcpServer.setOption("child.reuseAddress", true);
 
 udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
 Executors.newCachedThreadPool()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11365. Log portnumber in PrivilegedNfsGatewayStarter. Contributed by Mukul Kumar Singh.

2017-01-24 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b4078e1d0 -> 1af0a4d90


HDFS-11365. Log portnumber in PrivilegedNfsGatewayStarter. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1af0a4d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1af0a4d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1af0a4d9

Branch: refs/heads/branch-2
Commit: 1af0a4d901d296fc25242f0ca4578417133773f4
Parents: b4078e1
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Jan 24 21:58:02 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Jan 24 21:59:41 2017 -0800

--
 .../hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java | 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1af0a4d9/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
index 3934d7c..695cbc3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
@@ -18,9 +18,12 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.net.DatagramSocket;
 import java.net.InetSocketAddress;
+import java.net.SocketException;
 
 import org.apache.commons.daemon.Daemon;
 import org.apache.commons.daemon.DaemonContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 
@@ -34,7 +37,7 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
  * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880
  */
 public class PrivilegedNfsGatewayStarter implements Daemon {
-   
+  static final Log LOG = LogFactory.getLog(PrivilegedNfsGatewayStarter.class);
   private String[] args = null;
   private DatagramSocket registrationSocket = null;
 
@@ -49,9 +52,15 @@ public class PrivilegedNfsGatewayStarter implements Daemon {
   NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " +
   "privileged port.");
 }
-registrationSocket = new DatagramSocket(
-new InetSocketAddress("localhost", clientPort));
-registrationSocket.setReuseAddress(true);
+
+try {
+  registrationSocket = new DatagramSocket(
+new InetSocketAddress("localhost", clientPort));
+  registrationSocket.setReuseAddress(true);
+} catch (SocketException e) {
+  LOG.error("Init failed for port=" + clientPort, e);
+  throw e;
+}
 args = context.getArguments();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11365. Log portnumber in PrivilegedNfsGatewayStarter. Contributed by Mukul Kumar Singh.

2017-01-24 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9c0a4d3e7 -> 5a5652098


HDFS-11365. Log portnumber in PrivilegedNfsGatewayStarter. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a565209
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a565209
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a565209

Branch: refs/heads/trunk
Commit: 5a5652098c0c717fee045e12bcaf7cf5a72635ea
Parents: 9c0a4d3
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Tue Jan 24 21:58:02 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Tue Jan 24 21:58:02 2017 -0800

--
 .../hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java | 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a565209/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
index 3934d7c..695cbc3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
@@ -18,9 +18,12 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.net.DatagramSocket;
 import java.net.InetSocketAddress;
+import java.net.SocketException;
 
 import org.apache.commons.daemon.Daemon;
 import org.apache.commons.daemon.DaemonContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 
@@ -34,7 +37,7 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
  * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880
  */
 public class PrivilegedNfsGatewayStarter implements Daemon {
-   
+  static final Log LOG = LogFactory.getLog(PrivilegedNfsGatewayStarter.class);
   private String[] args = null;
   private DatagramSocket registrationSocket = null;
 
@@ -49,9 +52,15 @@ public class PrivilegedNfsGatewayStarter implements Daemon {
   NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " +
   "privileged port.");
 }
-registrationSocket = new DatagramSocket(
-new InetSocketAddress("localhost", clientPort));
-registrationSocket.setReuseAddress(true);
+
+try {
+  registrationSocket = new DatagramSocket(
+new InetSocketAddress("localhost", clientPort));
+  registrationSocket.setReuseAddress(true);
+} catch (SocketException e) {
+  LOG.error("Init failed for port=" + clientPort, e);
+  throw e;
+}
 args = context.getArguments();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11307. The rpc to portmap service for NFS has hardcoded timeout. Contributed by Mukul Kumar Singh.

2017-01-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 954dae26c -> ffb4c22f6


HDFS-11307. The rpc to portmap service for NFS has hardcoded timeout. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffb4c22f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffb4c22f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffb4c22f

Branch: refs/heads/branch-2
Commit: ffb4c22f6edf72577564421fd169bfc85d7a1e54
Parents: 954dae2
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 16 14:33:56 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 16 15:17:29 2017 -0800

--
 .../org/apache/hadoop/oncrpc/RpcProgram.java| 30 
 .../apache/hadoop/oncrpc/SimpleUdpClient.java   | 11 +--
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  6 
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java |  4 ++-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java|  4 ++-
 .../org/apache/hadoop/hdfs/nfs/TestMountd.java  | 13 -
 6 files changed, 58 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffb4c22f/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index cebfcfa..c541cd6 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -22,6 +22,7 @@ import java.net.DatagramSocket;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
@@ -55,7 +56,18 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
* system portmap daemon when registering this RPC server program.
*/
   private final DatagramSocket registrationSocket;
-  
+  /*
+   * Timeout value in millisecond for the rpc connection to portmap
+   */
+  private final int portmapUdpTimeoutMillis;
+
+  protected RpcProgram(String program, String host, int port, int progNumber,
+  int lowProgVersion, int highProgVersion,
+  DatagramSocket registrationSocket, boolean allowInsecurePorts) {
+this(program, host, port, progNumber, lowProgVersion, highProgVersion,
+registrationSocket, allowInsecurePorts, 500);
+  }
+
   /**
* Constructor
* 
@@ -69,10 +81,12 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
*with portmap daemon
* @param allowInsecurePorts true to allow client connections from
*unprivileged ports, false otherwise
+   * @param  portmapUdpTimeoutMillis timeout in milliseconds for RPC connection
*/
   protected RpcProgram(String program, String host, int port, int progNumber,
   int lowProgVersion, int highProgVersion,
-  DatagramSocket registrationSocket, boolean allowInsecurePorts) {
+  DatagramSocket registrationSocket, boolean allowInsecurePorts,
+  int portmapUdpTimeoutMillis) {
 this.program = program;
 this.host = host;
 this.port = port;
@@ -81,6 +95,7 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
 this.highProgVersion = highProgVersion;
 this.registrationSocket = registrationSocket;
 this.allowInsecurePorts = allowInsecurePorts;
+this.portmapUdpTimeoutMillis = portmapUdpTimeoutMillis;
 LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
 + "connections from unprivileged ports");
   }
@@ -124,14 +139,14 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
   }
   
   /**
-   * Register the program with Portmap or Rpcbind
+   * Register the program with Portmap or Rpcbind.
* @param mapEntry port map entries
* @param set specifies registration or not
*/
   protected void register(PortmapMapping mapEntry, boolean set) {
 XDR mappingRequest = PortmapRequest.create(mapEntry, set);
 SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
-mappingRequest, registrationSocket);
+mappingRequest, true, registrationSocket, portmapUdpTimeoutMillis);
 try {
   registrationClient.run();
 } catch (IOException e) {
@@ -238,4 +253,9 @@ public abstract class RpcProgram extends 
SimpleCh

hadoop git commit: HDFS-11307. The rpc to portmap service for NFS has hardcoded timeout. Contributed by Mukul Kumar Singh.

2017-01-16 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79e939d0b -> d1d0b3e1f


HDFS-11307. The rpc to portmap service for NFS has hardcoded timeout. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1d0b3e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1d0b3e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1d0b3e1

Branch: refs/heads/trunk
Commit: d1d0b3e1fd593d590aaf2e3db8f730a296b20aa1
Parents: 79e939d
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 16 14:33:56 2017 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 16 14:53:53 2017 -0800

--
 .../org/apache/hadoop/oncrpc/RpcProgram.java| 30 
 .../apache/hadoop/oncrpc/SimpleUdpClient.java   | 11 +--
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  6 
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java |  4 ++-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java|  4 ++-
 .../org/apache/hadoop/hdfs/nfs/TestMountd.java  | 13 -
 6 files changed, 58 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1d0b3e1/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index cebfcfa..c541cd6 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -22,6 +22,7 @@ import java.net.DatagramSocket;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
@@ -55,7 +56,18 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
* system portmap daemon when registering this RPC server program.
*/
   private final DatagramSocket registrationSocket;
-  
+  /*
+   * Timeout value in millisecond for the rpc connection to portmap
+   */
+  private final int portmapUdpTimeoutMillis;
+
+  protected RpcProgram(String program, String host, int port, int progNumber,
+  int lowProgVersion, int highProgVersion,
+  DatagramSocket registrationSocket, boolean allowInsecurePorts) {
+this(program, host, port, progNumber, lowProgVersion, highProgVersion,
+registrationSocket, allowInsecurePorts, 500);
+  }
+
   /**
* Constructor
* 
@@ -69,10 +81,12 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
*with portmap daemon
* @param allowInsecurePorts true to allow client connections from
*unprivileged ports, false otherwise
+   * @param  portmapUdpTimeoutMillis timeout in milliseconds for RPC connection
*/
   protected RpcProgram(String program, String host, int port, int progNumber,
   int lowProgVersion, int highProgVersion,
-  DatagramSocket registrationSocket, boolean allowInsecurePorts) {
+  DatagramSocket registrationSocket, boolean allowInsecurePorts,
+  int portmapUdpTimeoutMillis) {
 this.program = program;
 this.host = host;
 this.port = port;
@@ -81,6 +95,7 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
 this.highProgVersion = highProgVersion;
 this.registrationSocket = registrationSocket;
 this.allowInsecurePorts = allowInsecurePorts;
+this.portmapUdpTimeoutMillis = portmapUdpTimeoutMillis;
 LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
 + "connections from unprivileged ports");
   }
@@ -124,14 +139,14 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
   }
   
   /**
-   * Register the program with Portmap or Rpcbind
+   * Register the program with Portmap or Rpcbind.
* @param mapEntry port map entries
* @param set specifies registration or not
*/
   protected void register(PortmapMapping mapEntry, boolean set) {
 XDR mappingRequest = PortmapRequest.create(mapEntry, set);
 SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
-mappingRequest, registrationSocket);
+mappingRequest, true, registrationSocket, portmapUdpTimeoutMillis);
 try {
   registrationClient.run();
 } catch (IOException e) {
@@ -238,4 +253,9 @@ public abstract class RpcProgram extends 
SimpleCh

hadoop git commit: HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang Liu.

2016-08-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6b3114a2a -> f4bf3e22d


HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang 
Liu.

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4bf3e22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4bf3e22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4bf3e22

Branch: refs/heads/branch-2.8
Commit: f4bf3e22de26d8935edb8e56b53cca2ed977e752
Parents: 6b3114a
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Aug 8 11:00:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Aug 8 11:06:05 2016 -0700

--
 .../security/ssl/ReloadingX509TrustManager.java | 12 ++--
 .../ssl/TestReloadingX509TrustManager.java  | 64 ++--
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4bf3e22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index 1b24940..bb90a61 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -23,6 +23,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
@@ -44,8 +46,11 @@ import java.util.concurrent.atomic.AtomicReference;
 public final class ReloadingX509TrustManager
   implements X509TrustManager, Runnable {
 
-  private static final Log LOG =
-LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final Log LOG = LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final String RELOAD_ERROR_MESSAGE =
+  "Could not load truststore (keep using existing one) : ";
 
   private String type;
   private File file;
@@ -194,8 +199,7 @@ public final class ReloadingX509TrustManager
 try {
   trustManagerRef.set(loadTrustManager());
 } catch (Exception ex) {
-  LOG.warn("Could not load truststore (keep using existing one) : " +
-   ex.toString(), ex);
+  LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4bf3e22/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 75e5a8e..09e959b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -18,6 +18,11 @@
 package org.apache.hadoop.security.ssl;
 
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
+import com.google.common.base.Supplier;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -29,11 +34,13 @@ import java.security.KeyPair;
 import java.security.cert.X509Certificate;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 import static org.junit.Assert.assertEquals;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.createTrustStore;
 import static 
org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateCertificate;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateKeyPair;
+import static org.junit.Assert.assertFalse;
 
 public class TestReloadingX509TrustManager {
 
@@ -43,6 +50,8 @@ public class TestReloadingX509TrustManager {
 
   private X509Certificat

hadoop git commit: HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang Liu.

2016-08-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ce8df272f -> e9955b180


HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang 
Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9955b18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9955b18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9955b18

Branch: refs/heads/branch-2
Commit: e9955b18060ae0436ee6a967b56e4bc65ef8ec5f
Parents: ce8df27
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Aug 8 11:00:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Aug 8 11:02:07 2016 -0700

--
 .../security/ssl/ReloadingX509TrustManager.java | 12 ++--
 .../ssl/TestReloadingX509TrustManager.java  | 63 ++--
 2 files changed, 53 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9955b18/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index 1b24940..bb90a61 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -23,6 +23,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
@@ -44,8 +46,11 @@ import java.util.concurrent.atomic.AtomicReference;
 public final class ReloadingX509TrustManager
   implements X509TrustManager, Runnable {
 
-  private static final Log LOG =
-LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final Log LOG = LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final String RELOAD_ERROR_MESSAGE =
+  "Could not load truststore (keep using existing one) : ";
 
   private String type;
   private File file;
@@ -194,8 +199,7 @@ public final class ReloadingX509TrustManager
 try {
   trustManagerRef.set(loadTrustManager());
 } catch (Exception ex) {
-  LOG.warn("Could not load truststore (keep using existing one) : " +
-   ex.toString(), ex);
+  LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9955b18/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 9375da8..bf058cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -19,6 +19,10 @@ package org.apache.hadoop.security.ssl;
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
+import com.google.common.base.Supplier;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -30,11 +34,13 @@ import java.security.KeyPair;
 import java.security.cert.X509Certificate;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 import static org.junit.Assert.assertEquals;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.createTrustStore;
 import static 
org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateCertificate;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateKeyPair;
+import static org.junit.Assert.assertFalse;
 
 public class TestReloadingX509TrustManager {
 
@@ -43,6 +49,8 @@ public class TestReloadingX509TrustManager {
 
   private X509Certificate cert1;
   private X509Certificate cert2;
+  private final LogCapturer reloaderLog = LogCapturer.captureLogs(
+  ReloadingX509TrustManage

hadoop git commit: HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang Liu.

2016-08-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 37d939a45 -> 625585950


HADOOP-10823. TestReloadingX509TrustManager is flaky. Contributed by Mingliang 
Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62558595
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62558595
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62558595

Branch: refs/heads/trunk
Commit: 625585950a15461eb032e5e7ed8fdf4e1113b2bb
Parents: 37d939a
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Aug 8 11:00:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Aug 8 11:00:19 2016 -0700

--
 .../security/ssl/ReloadingX509TrustManager.java | 12 ++--
 .../ssl/TestReloadingX509TrustManager.java  | 63 ++--
 2 files changed, 53 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62558595/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index 1b24940..bb90a61 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -23,6 +23,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
@@ -44,8 +46,11 @@ import java.util.concurrent.atomic.AtomicReference;
 public final class ReloadingX509TrustManager
   implements X509TrustManager, Runnable {
 
-  private static final Log LOG =
-LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final Log LOG = LogFactory.getLog(ReloadingX509TrustManager.class);
+  @VisibleForTesting
+  static final String RELOAD_ERROR_MESSAGE =
+  "Could not load truststore (keep using existing one) : ";
 
   private String type;
   private File file;
@@ -194,8 +199,7 @@ public final class ReloadingX509TrustManager
 try {
   trustManagerRef.set(loadTrustManager());
 } catch (Exception ex) {
-  LOG.warn("Could not load truststore (keep using existing one) : " +
-   ex.toString(), ex);
+  LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62558595/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 9375da8..bf058cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -19,6 +19,10 @@ package org.apache.hadoop.security.ssl;
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
+import com.google.common.base.Supplier;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -30,11 +34,13 @@ import java.security.KeyPair;
 import java.security.cert.X509Certificate;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 import static org.junit.Assert.assertEquals;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.createTrustStore;
 import static 
org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateCertificate;
 import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateKeyPair;
+import static org.junit.Assert.assertFalse;
 
 public class TestReloadingX509TrustManager {
 
@@ -43,6 +49,8 @@ public class TestReloadingX509TrustManager {
 
   private X509Certificate cert1;
   private X509Certificate cert2;
+  private final LogCapturer reloaderLog = LogCapturer.captureLogs(
+  ReloadingX509TrustManager.LOG);

hadoop git commit: HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.

2016-07-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 343633a6e -> a363277be


HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and 
s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a363277b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a363277b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a363277b

Branch: refs/heads/branch-2.8
Commit: a363277be50a69786dbdfd0dd243dc8d3f136366
Parents: 343633a
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 15 14:28:53 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 15 14:32:20 2016 -0700

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 18 ++
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  2 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java | 20 
 3 files changed, 24 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a363277b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index 83d880a..d631dd4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -88,6 +89,14 @@ public class DFSOpsCountStatistics extends StorageStatistics 
{
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 
+private static final Map<String, OpType> SYMBOL_MAP =
+new HashMap<>(OpType.values().length);
+static {
+  for (OpType opType : values()) {
+SYMBOL_MAP.put(opType.getSymbol(), opType);
+  }
+}
+
 private final String symbol;
 
 OpType(String symbol) {
@@ -99,14 +108,7 @@ public class DFSOpsCountStatistics extends 
StorageStatistics {
 }
 
 public static OpType fromSymbol(String symbol) {
-  if (symbol != null) {
-for (OpType opType : values()) {
-  if (opType.getSymbol().equals(symbol)) {
-return opType;
-  }
-}
-  }
-  return null;
+  return SYMBOL_MAP.get(symbol);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a363277b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
index d63ef10..5ccee3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
@@ -111,12 +111,14 @@ public class TestDFSOpsCountStatistics {
 
   @Test
   public void testGetLong() {
+assertNull(statistics.getLong(null));
 assertNull(statistics.getLong(NO_SUCH_OP));
 verifyStatistics();
   }
 
   @Test
   public void testIsTracked() {
+assertFalse(statistics.isTracked(null));
 assertFalse(statistics.isTracked(NO_SUCH_OP));
 
 final Iterator iter = statistics.getLongStatistics();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a363277b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index 3c205f3..36d163c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.s3a;
 
 import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  *

hadoop git commit: HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.

2016-07-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 04c8294b6 -> 328866905


HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and 
s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32886690
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32886690
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32886690

Branch: refs/heads/branch-2
Commit: 328866905cd747b9ec94ae21ac351a5ca709c550
Parents: 04c8294
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 15 14:28:53 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 15 14:29:59 2016 -0700

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 18 ++
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  2 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java | 20 
 3 files changed, 24 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32886690/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index 83d880a..d631dd4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -88,6 +89,14 @@ public class DFSOpsCountStatistics extends StorageStatistics 
{
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 
+private static final Map<String, OpType> SYMBOL_MAP =
+new HashMap<>(OpType.values().length);
+static {
+  for (OpType opType : values()) {
+SYMBOL_MAP.put(opType.getSymbol(), opType);
+  }
+}
+
 private final String symbol;
 
 OpType(String symbol) {
@@ -99,14 +108,7 @@ public class DFSOpsCountStatistics extends 
StorageStatistics {
 }
 
 public static OpType fromSymbol(String symbol) {
-  if (symbol != null) {
-for (OpType opType : values()) {
-  if (opType.getSymbol().equals(symbol)) {
-return opType;
-  }
-}
-  }
-  return null;
+  return SYMBOL_MAP.get(symbol);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32886690/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
index d63ef10..5ccee3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
@@ -111,12 +111,14 @@ public class TestDFSOpsCountStatistics {
 
   @Test
   public void testGetLong() {
+assertNull(statistics.getLong(null));
 assertNull(statistics.getLong(NO_SUCH_OP));
 verifyStatistics();
   }
 
   @Test
   public void testIsTracked() {
+assertFalse(statistics.isTracked(null));
 assertFalse(statistics.isTracked(NO_SUCH_OP));
 
 final Iterator iter = statistics.getLongStatistics();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32886690/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index 3c205f3..36d163c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.s3a;
 
 import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  *

hadoop git commit: HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.

2016-07-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 442162048 -> c2f9cd584


HADOOP-13368. DFSOpsCountStatistics$OpType#fromSymbol and 
s3a.Statistic#fromSymbol should be O(1) operation. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2f9cd58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2f9cd58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2f9cd58

Branch: refs/heads/trunk
Commit: c2f9cd584cd7852b4745396494cc0c423eb645c1
Parents: 4421620
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 15 14:28:53 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 15 14:28:53 2016 -0700

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 18 ++
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  2 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java | 20 
 3 files changed, 24 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f9cd58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index 83d880a..d631dd4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -88,6 +89,14 @@ public class DFSOpsCountStatistics extends StorageStatistics 
{
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 
+private static final Map<String, OpType> SYMBOL_MAP =
+new HashMap<>(OpType.values().length);
+static {
+  for (OpType opType : values()) {
+SYMBOL_MAP.put(opType.getSymbol(), opType);
+  }
+}
+
 private final String symbol;
 
 OpType(String symbol) {
@@ -99,14 +108,7 @@ public class DFSOpsCountStatistics extends 
StorageStatistics {
 }
 
 public static OpType fromSymbol(String symbol) {
-  if (symbol != null) {
-for (OpType opType : values()) {
-  if (opType.getSymbol().equals(symbol)) {
-return opType;
-  }
-}
-  }
-  return null;
+  return SYMBOL_MAP.get(symbol);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f9cd58/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
index d63ef10..5ccee3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
@@ -111,12 +111,14 @@ public class TestDFSOpsCountStatistics {
 
   @Test
   public void testGetLong() {
+assertNull(statistics.getLong(null));
 assertNull(statistics.getLong(NO_SUCH_OP));
 verifyStatistics();
   }
 
   @Test
   public void testIsTracked() {
+assertFalse(statistics.isTracked(null));
 assertFalse(statistics.isTracked(NO_SUCH_OP));
 
 final Iterator iter = statistics.getLongStatistics();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f9cd58/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index 3c205f3..36d163c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.s3a;
 
 import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * Statistic which are c

hadoop git commit: HDFS-10579. HDFS web interfaces lack configs for X-FRAME-OPTIONS protection. Contributed by Anu Engineer.

2016-07-11 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 02b037f62 -> be1a11c9c


HDFS-10579. HDFS web interfaces lack configs for X-FRAME-OPTIONS protection. 
Contributed by Anu Engineer.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be1a11c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be1a11c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be1a11c9

Branch: refs/heads/branch-2
Commit: be1a11c9c8255ab71f3f8dcb1b83c638ba11025a
Parents: 02b037f
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 11 14:55:33 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 11 17:54:18 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  6 ++
 .../server/datanode/web/DatanodeHttpServer.java | 10 ++
 .../server/namenode/NameNodeHttpServer.java | 20 
 .../src/main/resources/hdfs-default.xml | 24 +
 .../datanode/web/TestDatanodeHttpXFrame.java| 90 ++
 .../namenode/TestNameNodeHttpServerXFrame.java  | 97 
 6 files changed, 247 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1a11c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f3a4dcb..f31eb0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -702,6 +702,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Security-related configs
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = 
"dfs.encrypt.data.transfer";
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
+  public static final String DFS_XFRAME_OPTION_ENABLED = "dfs.xframe.enabled";
+  public static final boolean DFS_XFRAME_OPTION_ENABLED_DEFAULT = true;
+
+  public static final String DFS_XFRAME_OPTION_VALUE = "dfs.xframe.value";
+  public static final String DFS_XFRAME_OPTION_VALUE_DEFAULT = "SAMEORIGIN";
+
   @Deprecated
   public static final String 
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
   HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1a11c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0477028..caee6cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -107,6 +107,16 @@ public class DatanodeHttpServer implements Closeable {
 .addEndpoint(URI.create("http://localhost:0;))
 .setFindPort(true);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 this.infoServer = builder.build();
 
 this.infoServer.addInternalServlet(null, "/streamFile/*", 
StreamFile.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1a11c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index a66eb96..fa93089 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ 
b/hadoop-hdfs-pr

hadoop git commit: HDFS-10579. HDFS web interfaces lack configs for X-FRAME-OPTIONS protection. Contributed by Anu Engineer.

2016-07-11 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0fd3980a1 -> c447efebd


HDFS-10579. HDFS web interfaces lack configs for X-FRAME-OPTIONS protection. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c447efeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c447efeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c447efeb

Branch: refs/heads/trunk
Commit: c447efebdb92dcdf3d95e983036f53bfbed2c0b4
Parents: 0fd3980
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jul 11 14:55:33 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jul 11 14:55:33 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  6 ++
 .../server/datanode/web/DatanodeHttpServer.java | 10 ++
 .../server/namenode/NameNodeHttpServer.java | 20 
 .../src/main/resources/hdfs-default.xml | 24 +
 .../datanode/web/TestDatanodeHttpXFrame.java| 90 ++
 .../namenode/TestNameNodeHttpServerXFrame.java  | 97 
 6 files changed, 247 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c447efeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 08365cd..e734055 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -741,6 +741,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Security-related configs
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = 
"dfs.encrypt.data.transfer";
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
+  public static final String DFS_XFRAME_OPTION_ENABLED = "dfs.xframe.enabled";
+  public static final boolean DFS_XFRAME_OPTION_ENABLED_DEFAULT = true;
+
+  public static final String DFS_XFRAME_OPTION_VALUE = "dfs.xframe.value";
+  public static final String DFS_XFRAME_OPTION_VALUE_DEFAULT = "SAMEORIGIN";
+
   @Deprecated
   public static final String 
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
   HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c447efeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index f9bdbf6..07b779b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -105,6 +105,16 @@ public class DatanodeHttpServer implements Closeable {
 .addEndpoint(URI.create("http://localhost:0;))
 .setFindPort(true);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 this.infoServer = builder.build();
 
 this.infoServer.setAttribute("datanode", datanode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c447efeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 84229e7..a1959e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -29,6 +29,7 @@ import java.util.Map.Entry;
 
 import javax.servlet.Ser

hadoop git commit: HADOOP-13352. Make X-FRAME-OPTIONS configurable in HttpServer2. Contributed by Anu Engineer.

2016-07-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f515678b6 -> e4023f8b1


HADOOP-13352. Make X-FRAME-OPTIONS configurable in HttpServer2. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4023f8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4023f8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4023f8b

Branch: refs/heads/branch-2
Commit: e4023f8b1390cb769555dc52a610b4df03104836
Parents: f515678
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 8 14:17:14 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 8 14:20:00 2016 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 67 ++-
 .../hadoop/http/HttpServerFunctionalTest.java   | 19 ++
 .../org/apache/hadoop/http/TestHttpServer.java  | 68 ++--
 3 files changed, 147 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4023f8b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 5d49229..c179bd0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -137,6 +137,11 @@ public final class HttpServer2 implements FilterContainer {
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
   private final SignerSecretProvider secretProvider;
+  private XFrameOption xFrameOption;
+  private boolean xFrameOptionIsEnabled;
+  private static final String X_FRAME_VALUE = "xFrameOption";
+  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
+
 
   /**
* Class to construct instances of HTTP server with specific options.
@@ -169,6 +174,9 @@ public final class HttpServer2 implements FilterContainer {
 private String authFilterConfigurationPrefix = 
"hadoop.http.authentication.";
 private String excludeCiphers;
 
+private boolean xFrameEnabled;
+private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN;
+
 public Builder setName(String name){
   this.name = name;
   return this;
@@ -277,6 +285,30 @@ public final class HttpServer2 implements FilterContainer {
   return this;
 }
 
+/**
+ * Adds the ability to control X_FRAME_OPTIONS on HttpServer2.
+ * @param xFrameEnabled - True enables X_FRAME_OPTIONS false disables it.
+ * @return Builder.
+ */
+public Builder configureXFrame(boolean xFrameEnabled) {
+  this.xFrameEnabled = xFrameEnabled;
+  return this;
+}
+
+/**
+ * Sets a valid X-Frame-option that can be used by HttpServer2.
+ * @param option - String DENY, SAMEORIGIN or ALLOW-FROM are the only valid
+ *   options. Any other value will throw IllegalArgument
+ *   Exception.
+ * @return  Builder.
+ */
+public Builder setXFrameOption(String option) {
+  this.xFrameOption = XFrameOption.getEnum(option);
+  return this;
+}
+
+
+
 public HttpServer2 build() throws IOException {
   Preconditions.checkNotNull(name, "name is not set");
   Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
@@ -343,6 +375,9 @@ public final class HttpServer2 implements FilterContainer {
 this.webServer = new Server();
 this.adminsAcl = b.adminsAcl;
 this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, 
appDir);
+this.xFrameOptionIsEnabled = b.xFrameEnabled;
+this.xFrameOption = b.xFrameOption;
+
 try {
   this.secretProvider =
   constructSecretProvider(b, webAppContext.getServletContext());
@@ -399,7 +434,11 @@ public final class HttpServer2 implements FilterContainer {
 
 addDefaultApps(contexts, appDir, conf);
 
-addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
+Map<String, String> xFrameParams = new HashMap<>();
+xFrameParams.put(X_FRAME_ENABLED,
+String.valueOf(this.xFrameOptionIsEnabled));
+xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+addGlobalFilter("safety", QuotingInputFilter.class.getName(), 
xFrameParams);
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
   conf = new Configuration

hadoop git commit: HADOOP-13352. Make X-FRAME-OPTIONS configurable in HttpServer2. Contributed by Anu Engineer.

2016-07-08 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk c04c5ec50 -> ef30bf3c3


HADOOP-13352. Make X-FRAME-OPTIONS configurable in HttpServer2. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef30bf3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef30bf3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef30bf3c

Branch: refs/heads/trunk
Commit: ef30bf3c3f2688f803b3e9d16cc7e9f61a1ab0de
Parents: c04c5ec
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 8 14:17:14 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 8 14:17:14 2016 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 67 ++-
 .../hadoop/http/HttpServerFunctionalTest.java   | 19 ++
 .../org/apache/hadoop/http/TestHttpServer.java  | 68 ++--
 3 files changed, 147 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef30bf3c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 9d2fae6..8199c9b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -136,6 +136,11 @@ public final class HttpServer2 implements FilterContainer {
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
   private final SignerSecretProvider secretProvider;
+  private XFrameOption xFrameOption;
+  private boolean xFrameOptionIsEnabled;
+  private static final String X_FRAME_VALUE = "xFrameOption";
+  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
+
 
   /**
* Class to construct instances of HTTP server with specific options.
@@ -168,6 +173,9 @@ public final class HttpServer2 implements FilterContainer {
 private String authFilterConfigurationPrefix = 
"hadoop.http.authentication.";
 private String excludeCiphers;
 
+private boolean xFrameEnabled;
+private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN;
+
 public Builder setName(String name){
   this.name = name;
   return this;
@@ -276,6 +284,30 @@ public final class HttpServer2 implements FilterContainer {
   return this;
 }
 
+/**
+ * Adds the ability to control X_FRAME_OPTIONS on HttpServer2.
+ * @param xFrameEnabled - True enables X_FRAME_OPTIONS false disables it.
+ * @return Builder.
+ */
+public Builder configureXFrame(boolean xFrameEnabled) {
+  this.xFrameEnabled = xFrameEnabled;
+  return this;
+}
+
+/**
+ * Sets a valid X-Frame-option that can be used by HttpServer2.
+ * @param option - String DENY, SAMEORIGIN or ALLOW-FROM are the only valid
+ *   options. Any other value will throw IllegalArgument
+ *   Exception.
+ * @return  Builder.
+ */
+public Builder setXFrameOption(String option) {
+  this.xFrameOption = XFrameOption.getEnum(option);
+  return this;
+}
+
+
+
 public HttpServer2 build() throws IOException {
   Preconditions.checkNotNull(name, "name is not set");
   Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
@@ -342,6 +374,9 @@ public final class HttpServer2 implements FilterContainer {
 this.webServer = new Server();
 this.adminsAcl = b.adminsAcl;
 this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, 
appDir);
+this.xFrameOptionIsEnabled = b.xFrameEnabled;
+this.xFrameOption = b.xFrameOption;
+
 try {
   this.secretProvider =
   constructSecretProvider(b, webAppContext.getServletContext());
@@ -398,7 +433,11 @@ public final class HttpServer2 implements FilterContainer {
 
 addDefaultApps(contexts, appDir, conf);
 
-addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
+Map<String, String> xFrameParams = new HashMap<>();
+xFrameParams.put(X_FRAME_ENABLED,
+String.valueOf(this.xFrameOptionIsEnabled));
+xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+addGlobalFilter("safety", QuotingInputFilter.class.getName(), 
xFrameParams);
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
   conf = new Configuration(conf);
@@ -11

hadoop git commit: HADOOP-13283. Support reset operation for new global storage statistics and per FS storage stats. Contributed by Mingliang Liu.

2016-07-07 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 097022e17 -> 9c7ce389c


HADOOP-13283. Support reset operation for new global storage statistics and per 
FS storage stats. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c7ce389
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c7ce389
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c7ce389

Branch: refs/heads/branch-2.8
Commit: 9c7ce389c7fee162e3d1e1643b071656a1a5d09e
Parents: 097022e
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Jul 7 14:58:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Jul 7 15:04:47 2016 -0700

--
 .../hadoop/fs/EmptyStorageStatistics.java   |   7 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +-
 .../hadoop/fs/FileSystemStorageStatistics.java  |   6 +
 .../hadoop/fs/GlobalStorageStatistics.java  |  18 ++-
 .../org/apache/hadoop/fs/StorageStatistics.java |   8 +-
 .../hadoop/fs/UnionStorageStatistics.java   |  26 +++-
 .../fs/TestFileSystemStorageStatistics.java |   2 +-
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   7 +
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  | 137 +++
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  39 +-
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   7 +
 11 files changed, 229 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c7ce389/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
index 1bcfe23..1ef30dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
@@ -29,15 +29,22 @@ class EmptyStorageStatistics extends StorageStatistics {
 super(name);
   }
 
+  @Override
   public Iterator getLongStatistics() {
 return Collections.emptyIterator();
   }
 
+  @Override
   public Long getLong(String key) {
 return null;
   }
 
+  @Override
   public boolean isTracked(String key) {
 return false;
   }
+
+  @Override
+  public void reset() {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c7ce389/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 6558d98..e876c3a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3500,8 +3500,11 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* Reset all statistics for all file systems
*/
   public static synchronized void clearStatistics() {
-for(Statistics stat: statisticsTable.values()) {
-  stat.reset();
+final Iterator iterator =
+GlobalStorageStatistics.INSTANCE.iterator();
+while (iterator.hasNext()) {
+  final StorageStatistics statistics = iterator.next();
+  statistics.reset();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c7ce389/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index 6b5b72c..27c8405 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -126,6 +126,7 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
*
* @return True only if the statistic is being tracked.
*/
+  @Override
   public boolean isTracked(String key) {
 for (String k: KEYS) {
   if (k.equals(key)) {
@@ -134,4 +135,9 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
 }
 return false;
   }
+
+  @Override
+  pu

hadoop git commit: HADOOP-13283. Support reset operation for new global storage statistics and per FS storage stats. Contributed by Mingliang Liu.

2016-07-07 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 909fa138f -> 63b64a20c


HADOOP-13283. Support reset operation for new global storage statistics and per 
FS storage stats. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63b64a20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63b64a20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63b64a20

Branch: refs/heads/branch-2
Commit: 63b64a20c2c90db9f43a926d2a3844b752e9dd09
Parents: 909fa13
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Jul 7 14:58:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Jul 7 15:00:34 2016 -0700

--
 .../hadoop/fs/EmptyStorageStatistics.java   |   7 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +-
 .../hadoop/fs/FileSystemStorageStatistics.java  |   6 +
 .../hadoop/fs/GlobalStorageStatistics.java  |  18 ++-
 .../org/apache/hadoop/fs/StorageStatistics.java |   8 +-
 .../hadoop/fs/UnionStorageStatistics.java   |  26 +++-
 .../fs/TestFileSystemStorageStatistics.java |   2 +-
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   7 +
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  | 137 +++
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  39 +-
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   7 +
 11 files changed, 229 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63b64a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
index 1bcfe23..1ef30dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
@@ -29,15 +29,22 @@ class EmptyStorageStatistics extends StorageStatistics {
 super(name);
   }
 
+  @Override
   public Iterator getLongStatistics() {
 return Collections.emptyIterator();
   }
 
+  @Override
   public Long getLong(String key) {
 return null;
   }
 
+  @Override
   public boolean isTracked(String key) {
 return false;
   }
+
+  @Override
+  public void reset() {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63b64a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 740072d..007d90c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3605,8 +3605,11 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* Reset all statistics for all file systems
*/
   public static synchronized void clearStatistics() {
-for(Statistics stat: statisticsTable.values()) {
-  stat.reset();
+final Iterator iterator =
+GlobalStorageStatistics.INSTANCE.iterator();
+while (iterator.hasNext()) {
+  final StorageStatistics statistics = iterator.next();
+  statistics.reset();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63b64a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index 8a1eb54..8c633f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -138,6 +138,7 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
*
* @return True only if the statistic is being tracked.
*/
+  @Override
   public boolean isTracked(String key) {
 for (String k: KEYS) {
   if (k.equals(key)) {
@@ -146,4 +147,9 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
 }
 return false;
   }
+
+  @Override
+  pu

hadoop git commit: HADOOP-13283. Support reset operation for new global storage statistics and per FS storage stats. Contributed by Mingliang Liu.

2016-07-07 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk a0035661c -> 6e597600f


HADOOP-13283. Support reset operation for new global storage statistics and per 
FS storage stats. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e597600
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e597600
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e597600

Branch: refs/heads/trunk
Commit: 6e597600f7916772187fa1861daee42e6a5a71d8
Parents: a003566
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Thu Jul 7 14:58:19 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Thu Jul 7 14:58:19 2016 -0700

--
 .../hadoop/fs/EmptyStorageStatistics.java   |   7 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +-
 .../hadoop/fs/FileSystemStorageStatistics.java  |   6 +
 .../hadoop/fs/GlobalStorageStatistics.java  |  18 ++-
 .../org/apache/hadoop/fs/StorageStatistics.java |   8 +-
 .../hadoop/fs/UnionStorageStatistics.java   |  26 +++-
 .../fs/TestFileSystemStorageStatistics.java |   2 +-
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   7 +
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  | 137 +++
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  39 +-
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   7 +
 11 files changed, 229 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e597600/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
index 1bcfe23..1ef30dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
@@ -29,15 +29,22 @@ class EmptyStorageStatistics extends StorageStatistics {
 super(name);
   }
 
+  @Override
   public Iterator getLongStatistics() {
 return Collections.emptyIterator();
   }
 
+  @Override
   public Long getLong(String key) {
 return null;
   }
 
+  @Override
   public boolean isTracked(String key) {
 return false;
   }
+
+  @Override
+  public void reset() {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e597600/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9e13a7a..146bce5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3619,8 +3619,11 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* Reset all statistics for all file systems
*/
   public static synchronized void clearStatistics() {
-for(Statistics stat: statisticsTable.values()) {
-  stat.reset();
+final Iterator iterator =
+GlobalStorageStatistics.INSTANCE.iterator();
+while (iterator.hasNext()) {
+  final StorageStatistics statistics = iterator.next();
+  statistics.reset();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e597600/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index 8a1eb54..8c633f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -138,6 +138,7 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
*
* @return True only if the statistic is being tracked.
*/
+  @Override
   public boolean isTracked(String key) {
 for (String k: KEYS) {
   if (k.equals(key)) {
@@ -146,4 +147,9 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
 }
 return false;
   }
+
+  @Override
+  pu

hadoop git commit: HADOOP-13305. Define common statistics names across schemes. Contributed by Mingliang Liu.

2016-07-01 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4437e6f33 -> bc7fd76a1


HADOOP-13305. Define common statistics names across schemes. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc7fd76a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc7fd76a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc7fd76a

Branch: refs/heads/branch-2.8
Commit: bc7fd76a1fa2a8bf394de2787737a21000b51910
Parents: 4437e6f
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 1 15:34:03 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 1 15:37:32 2016 -0700

--
 .../hadoop/fs/FileSystemStorageStatistics.java  |   5 +
 .../org/apache/hadoop/fs/StorageStatistics.java |  53 ++
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 102 ++-
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  15 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   5 +
 .../org/apache/hadoop/fs/s3a/Statistic.java |  24 +++--
 6 files changed, 145 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7fd76a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index d85cd3f..6b5b72c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -107,6 +107,11 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
   }
 
   @Override
+  public String getScheme() {
+return stats.getScheme();
+  }
+
+  @Override
   public Iterator getLongStatistics() {
 return new LongStatisticIterator(stats.getData());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7fd76a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
index 4bdef80..0971f10 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
@@ -27,6 +27,51 @@ import java.util.Iterator;
  */
 @InterfaceAudience.Public
 public abstract class StorageStatistics {
+
+  /**
+   * These are common statistic names.
+   *
+   * The following names are considered general and preserved across different
+   * StorageStatistics classes. When implementing a new StorageStatistics, it 
is
+   * highly recommended to use the common statistic names.
+   *
+   * When adding new common statistic name constants, please make them unique.
+   * By convention, they are implicitly unique:
+   *  - the name of the constants are uppercase, words separated by 
underscores.
+   *  - the value of the constants are lowercase of the constant names.
+   */
+  public interface CommonStatisticNames {
+// The following names are for file system operation invocations
+String OP_APPEND = "op_append";
+String OP_COPY_FROM_LOCAL_FILE = "op_copy_from_local_file";
+String OP_CREATE = "op_create";
+String OP_CREATE_NON_RECURSIVE = "op_create_non_recursive";
+String OP_DELETE = "op_delete";
+String OP_EXISTS = "op_exists";
+String OP_GET_CONTENT_SUMMARY = "op_get_content_summary";
+String OP_GET_FILE_CHECKSUM = "op_get_file_checksum";
+String OP_GET_FILE_STATUS = "op_get_file_status";
+String OP_GET_STATUS = "op_get_status";
+String OP_GLOB_STATUS = "op_glob_status";
+String OP_IS_FILE = "op_is_file";
+String OP_IS_DIRECTORY = "op_is_directory";
+String OP_LIST_FILES = "op_list_files";
+String OP_LIST_LOCATED_STATUS = "op_list_located_status";
+String OP_LIST_STATUS = "op_list_status";
+String OP_MKDIRS = "op_mkdirs";
+String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries";
+String OP_OPEN = "op_open";
+String OP_REMOVE_ACL = "op_remove_acl";
+String OP_REMOVE_ACL_ENTRIES = &

hadoop git commit: HADOOP-13305. Define common statistics names across schemes. Contributed by Mingliang Liu.

2016-07-01 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d2559ca30 -> 06b89f0d1


HADOOP-13305. Define common statistics names across schemes. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06b89f0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06b89f0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06b89f0d

Branch: refs/heads/branch-2
Commit: 06b89f0d14b45cedd563be29fa7cf66e6482367c
Parents: d2559ca
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 1 15:34:03 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 1 15:35:50 2016 -0700

--
 .../hadoop/fs/FileSystemStorageStatistics.java  |   5 +
 .../org/apache/hadoop/fs/StorageStatistics.java |  53 ++
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 102 ++-
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  15 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   5 +
 .../org/apache/hadoop/fs/s3a/Statistic.java |  24 +++--
 6 files changed, 145 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06b89f0d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index 7c08863..8a1eb54 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -119,6 +119,11 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
   }
 
   @Override
+  public String getScheme() {
+return stats.getScheme();
+  }
+
+  @Override
   public Iterator getLongStatistics() {
 return new LongStatisticIterator(stats.getData());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06b89f0d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
index 4bdef80..0971f10 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
@@ -27,6 +27,51 @@ import java.util.Iterator;
  */
 @InterfaceAudience.Public
 public abstract class StorageStatistics {
+
+  /**
+   * These are common statistic names.
+   *
+   * The following names are considered general and preserved across different
+   * StorageStatistics classes. When implementing a new StorageStatistics, it 
is
+   * highly recommended to use the common statistic names.
+   *
+   * When adding new common statistic name constants, please make them unique.
+   * By convention, they are implicitly unique:
+   *  - the name of the constants are uppercase, words separated by 
underscores.
+   *  - the value of the constants are lowercase of the constant names.
+   */
+  public interface CommonStatisticNames {
+// The following names are for file system operation invocations
+String OP_APPEND = "op_append";
+String OP_COPY_FROM_LOCAL_FILE = "op_copy_from_local_file";
+String OP_CREATE = "op_create";
+String OP_CREATE_NON_RECURSIVE = "op_create_non_recursive";
+String OP_DELETE = "op_delete";
+String OP_EXISTS = "op_exists";
+String OP_GET_CONTENT_SUMMARY = "op_get_content_summary";
+String OP_GET_FILE_CHECKSUM = "op_get_file_checksum";
+String OP_GET_FILE_STATUS = "op_get_file_status";
+String OP_GET_STATUS = "op_get_status";
+String OP_GLOB_STATUS = "op_glob_status";
+String OP_IS_FILE = "op_is_file";
+String OP_IS_DIRECTORY = "op_is_directory";
+String OP_LIST_FILES = "op_list_files";
+String OP_LIST_LOCATED_STATUS = "op_list_located_status";
+String OP_LIST_STATUS = "op_list_status";
+String OP_MKDIRS = "op_mkdirs";
+String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries";
+String OP_OPEN = "op_open";
+String OP_REMOVE_ACL = "op_remove_acl";
+String OP_REMOVE_ACL_ENTRIES = &

hadoop git commit: HADOOP-13305. Define common statistics names across schemes. Contributed by Mingliang Liu.

2016-07-01 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk c35a5a7a8 -> aa42c7a6d


HADOOP-13305. Define common statistics names across schemes. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa42c7a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa42c7a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa42c7a6

Branch: refs/heads/trunk
Commit: aa42c7a6dda23f9dd686cc844b31a5aeebe7e088
Parents: c35a5a7
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Jul 1 15:34:03 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Jul 1 15:34:03 2016 -0700

--
 .../hadoop/fs/FileSystemStorageStatistics.java  |   5 +
 .../org/apache/hadoop/fs/StorageStatistics.java |  53 ++
 .../hadoop/hdfs/DFSOpsCountStatistics.java  | 102 ++-
 .../hadoop/hdfs/TestDFSOpsCountStatistics.java  |  15 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   5 +
 .../org/apache/hadoop/fs/s3a/Statistic.java |  24 +++--
 6 files changed, 145 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa42c7a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
index 7c08863..8a1eb54 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
@@ -119,6 +119,11 @@ public class FileSystemStorageStatistics extends 
StorageStatistics {
   }
 
   @Override
+  public String getScheme() {
+return stats.getScheme();
+  }
+
+  @Override
   public Iterator getLongStatistics() {
 return new LongStatisticIterator(stats.getData());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa42c7a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
index 4bdef80..0971f10 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
@@ -27,6 +27,51 @@ import java.util.Iterator;
  */
 @InterfaceAudience.Public
 public abstract class StorageStatistics {
+
+  /**
+   * These are common statistic names.
+   *
+   * The following names are considered general and preserved across different
+   * StorageStatistics classes. When implementing a new StorageStatistics, it 
is
+   * highly recommended to use the common statistic names.
+   *
+   * When adding new common statistic name constants, please make them unique.
+   * By convention, they are implicitly unique:
+   *  - the name of the constants are uppercase, words separated by 
underscores.
+   *  - the value of the constants are lowercase of the constant names.
+   */
+  public interface CommonStatisticNames {
+// The following names are for file system operation invocations
+String OP_APPEND = "op_append";
+String OP_COPY_FROM_LOCAL_FILE = "op_copy_from_local_file";
+String OP_CREATE = "op_create";
+String OP_CREATE_NON_RECURSIVE = "op_create_non_recursive";
+String OP_DELETE = "op_delete";
+String OP_EXISTS = "op_exists";
+String OP_GET_CONTENT_SUMMARY = "op_get_content_summary";
+String OP_GET_FILE_CHECKSUM = "op_get_file_checksum";
+String OP_GET_FILE_STATUS = "op_get_file_status";
+String OP_GET_STATUS = "op_get_status";
+String OP_GLOB_STATUS = "op_glob_status";
+String OP_IS_FILE = "op_is_file";
+String OP_IS_DIRECTORY = "op_is_directory";
+String OP_LIST_FILES = "op_list_files";
+String OP_LIST_LOCATED_STATUS = "op_list_located_status";
+String OP_LIST_STATUS = "op_list_status";
+String OP_MKDIRS = "op_mkdirs";
+String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries";
+String OP_OPEN = "op_open";
+String OP_REMOVE_ACL = "op_remove_acl";
+String OP_REMOVE_ACL_ENTRIES = "op_remove_acl

hadoop git commit: HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and Jitendra Pandey.

2016-06-20 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2f95d22b1 -> 29c3ee974


HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and 
Jitendra Pandey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29c3ee97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29c3ee97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29c3ee97

Branch: refs/heads/branch-2.8
Commit: 29c3ee9744ca0cc3b9221f00cbb9412f7895247f
Parents: 2f95d22
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jun 20 17:22:55 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jun 20 18:05:32 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 213 -
 .../hadoop/hdfs/DistributedFileSystem.java  |  10 -
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 454 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 187 
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 182 
 5 files changed, 1046 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c3ee97/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
deleted file mode 100644
index 472b1d4..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
-import org.apache.hadoop.io.retry.AsyncCallHandler;
-import org.apache.hadoop.util.concurrent.AsyncGetFuture;
-import org.apache.hadoop.ipc.Client;
-
-/
- * Implementation of the asynchronous distributed file system.
- * This instance of this class is the way end-user code interacts
- * with a Hadoop DistributedFileSystem in an asynchronous manner.
- *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
- */
-@Unstable
-public class AsyncDistributedFileSystem {
-
-  private final DistributedFileSystem dfs;
-
-  AsyncDistributedFileSystem(final DistributedFileSystem dfs) {
-this.dfs = dfs;
-  }
-
-  private static  Future getReturnValue() {
-return (Future)new AsyncGetFuture<>(AsyncCallHandler.getAsyncReturn());
-  }
-
-  /**
-   * Renames Path src to Path dst
-   * 
-   * Fails if src is a file and dst is a directory.
-   * Fails if src is a directory and dst is a file.
-   * Fails if the parent of dst does not exist or is a file.
-   * 
-   * 
-   * If OVERWRITE option is not passed as an argument, rename fails if the dst
-   * already exists.
-   * 
-   * If OVERWRITE option is passed as an argument, rename overwrites the dst if
-   * it is a file or an empty directory. Rename fails if dst is a non-empty
-   * directory.
-   * 
-   * Note that atomicity of rename is dependent on the file system
-   * implementation. Please refer to the file system documentation for details.
-   * This default implementation is non atomic.
-   *
-   * @param src
-   *  path to be renamed
-   *

hadoop git commit: HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and Jitendra Pandey.

2016-06-20 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8f93bbcb1 -> f2ef1145c


HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and 
Jitendra Pandey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2ef1145
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2ef1145
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2ef1145

Branch: refs/heads/branch-2
Commit: f2ef1145c5b4815670b08e1b29623a85501efeb0
Parents: 8f93bbc
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jun 20 17:22:55 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jun 20 18:04:05 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 213 -
 .../hadoop/hdfs/DistributedFileSystem.java  |  10 -
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 454 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 187 
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 182 
 5 files changed, 1046 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ef1145/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
deleted file mode 100644
index 472b1d4..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
-import org.apache.hadoop.io.retry.AsyncCallHandler;
-import org.apache.hadoop.util.concurrent.AsyncGetFuture;
-import org.apache.hadoop.ipc.Client;
-
-/
- * Implementation of the asynchronous distributed file system.
- * This instance of this class is the way end-user code interacts
- * with a Hadoop DistributedFileSystem in an asynchronous manner.
- *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
- */
-@Unstable
-public class AsyncDistributedFileSystem {
-
-  private final DistributedFileSystem dfs;
-
-  AsyncDistributedFileSystem(final DistributedFileSystem dfs) {
-this.dfs = dfs;
-  }
-
-  private static  Future getReturnValue() {
-return (Future)new AsyncGetFuture<>(AsyncCallHandler.getAsyncReturn());
-  }
-
-  /**
-   * Renames Path src to Path dst
-   * 
-   * Fails if src is a file and dst is a directory.
-   * Fails if src is a directory and dst is a file.
-   * Fails if the parent of dst does not exist or is a file.
-   * 
-   * 
-   * If OVERWRITE option is not passed as an argument, rename fails if the dst
-   * already exists.
-   * 
-   * If OVERWRITE option is passed as an argument, rename overwrites the dst if
-   * it is a file or an empty directory. Rename fails if dst is a non-empty
-   * directory.
-   * 
-   * Note that atomicity of rename is dependent on the file system
-   * implementation. Please refer to the file system documentation for details.
-   * This default implementation is non atomic.
-   *
-   * @param src
-   *  path to be renamed
-   *

hadoop git commit: HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and Jitendra Pandey.

2016-06-20 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk b7c4cf712 -> 7adc4d176


HDFS-10538. Remove AsyncDistributedFileSystem. Contributed by Xiaobing Zhou and 
Jitendra Pandey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7adc4d17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7adc4d17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7adc4d17

Branch: refs/heads/trunk
Commit: 7adc4d17691816ad32d8d71974a62b9f920cb4c2
Parents: b7c4cf7
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jun 20 17:22:55 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jun 20 17:22:55 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 213 -
 .../hadoop/hdfs/DistributedFileSystem.java  |  10 -
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 454 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 187 
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 
 5 files changed, 1045 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7adc4d17/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
deleted file mode 100644
index 824336a..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
-import org.apache.hadoop.io.retry.AsyncCallHandler;
-import org.apache.hadoop.util.concurrent.AsyncGetFuture;
-import org.apache.hadoop.ipc.Client;
-
-/
- * Implementation of the asynchronous distributed file system.
- * This instance of this class is the way end-user code interacts
- * with a Hadoop DistributedFileSystem in an asynchronous manner.
- *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
- */
-@Unstable
-public class AsyncDistributedFileSystem {
-
-  private final DistributedFileSystem dfs;
-
-  AsyncDistributedFileSystem(final DistributedFileSystem dfs) {
-this.dfs = dfs;
-  }
-
-  private static  Future getReturnValue() {
-return new AsyncGetFuture<>(AsyncCallHandler.getAsyncReturn());
-  }
-
-  /**
-   * Renames Path src to Path dst
-   * 
-   * Fails if src is a file and dst is a directory.
-   * Fails if src is a directory and dst is a file.
-   * Fails if the parent of dst does not exist or is a file.
-   * 
-   * 
-   * If OVERWRITE option is not passed as an argument, rename fails if the dst
-   * already exists.
-   * 
-   * If OVERWRITE option is passed as an argument, rename overwrites the dst if
-   * it is a file or an empty directory. Rename fails if dst is a non-empty
-   * directory.
-   * 
-   * Note that atomicity of rename is dependent on the file system
-   * implementation. Please refer to the file system documentation for details.
-   * This default implementation is non atomic.
-   *
-   * @param src
-   *  path to be renamed
-   * @param

hadoop git commit: HADOOP-12291. Add support for nested groups in LdapGroupsMapping. Contributed by Esther Kundin.

2016-06-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 75235149a -> 14b849489


HADOOP-12291. Add support for nested groups in LdapGroupsMapping. Contributed 
by Esther Kundin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b84948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b84948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b84948

Branch: refs/heads/branch-2
Commit: 14b849489ad60ef146d213bd350bb912b0aa5e84
Parents: 7523514
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Jun 15 11:41:49 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Jun 15 13:29:29 2016 -0700

--
 .../hadoop/security/LdapGroupsMapping.java  | 114 ---
 .../src/main/resources/core-default.xml |  13 +++
 .../hadoop/security/TestLdapGroupsMapping.java  |  62 --
 .../security/TestLdapGroupsMappingBase.java |  33 +-
 .../TestLdapGroupsMappingWithPosixGroup.java|   2 +-
 5 files changed, 198 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b84948/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index da87369..5a0b1d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Hashtable;
 import java.util.List;
+import java.util.HashSet;
+import java.util.Collection;
+import java.util.Set;
 
 import javax.naming.Context;
 import javax.naming.NamingEnumeration;
@@ -66,9 +69,11 @@ import org.apache.hadoop.conf.Configuration;
  * is used for searching users or groups which returns more results than are
  * allowed by the server, an exception will be thrown.
  * 
- * The implementation also does not attempt to resolve group hierarchies. In
- * order to be considered a member of a group, the user must be an explicit
- * member in LDAP.
+ * The implementation attempts to resolve group hierarchies,
+ * to a configurable limit.
+ * If the limit is 0, in order to be considered a member of a group,
+ * the user must be an explicit member in LDAP.  Otherwise, it will traverse 
the
+ * group hierarchy n levels up.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
@@ -157,6 +162,13 @@ public class LdapGroupsMapping
   public static final String GROUP_NAME_ATTR_DEFAULT = "cn";
 
   /*
+   * How many levels to traverse when checking for groups in the org hierarchy
+   */
+  public static final String GROUP_HIERARCHY_LEVELS_KEY =
+LDAP_CONFIG_PREFIX + ".search.group.hierarchy.levels";
+  public static final int GROUP_HIERARCHY_LEVELS_DEFAULT = 0;
+
+  /*
* LDAP attribute names to use when doing posix-like lookups
*/
   public static final String POSIX_UID_ATTR_KEY = LDAP_CONFIG_PREFIX + 
".posix.attr.uid.name";
@@ -208,6 +220,7 @@ public class LdapGroupsMapping
   private String memberOfAttr;
   private String groupMemberAttr;
   private String groupNameAttr;
+  private intgroupHierarchyLevels;
   private String posixUidAttr;
   private String posixGidAttr;
   private boolean isPosix;
@@ -234,7 +247,7 @@ public class LdapGroupsMapping
  */
 for(int retry = 0; retry < RECONNECT_RETRY_COUNT; retry++) {
   try {
-return doGetGroups(user);
+return doGetGroups(user, groupHierarchyLevels);
   } catch (NamingException e) {
 LOG.warn("Failed to get groups for user " + user + " (retry=" + retry
 + ") by " + e);
@@ -324,9 +337,11 @@ public class LdapGroupsMapping
* @return a list of strings representing group names of the user.
* @throws NamingException if unable to find group names
*/
-  private List lookupGroup(SearchResult result, DirContext c)
+  private List lookupGroup(SearchResult result, DirContext c,
+  int goUpHierarchy)
   throws NamingException {
 List groups = new ArrayList();
+Set groupDNs = new HashSet();
 
 NamingEnumeration groupResults = null;
 // perform the second LDAP query
@@ -345,12 +360,14 @@ public class LdapGroupsMapping
 if (groupResults != null) {
   while (groupResults.hasMoreElements()) {
 SearchResult groupResult = groupR

hadoop git commit: HADOOP-12291. Add support for nested groups in LdapGroupsMapping. Contributed by Esther Kundin.

2016-06-15 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25064fb2f -> 6f0aa7512


HADOOP-12291. Add support for nested groups in LdapGroupsMapping. Contributed 
by Esther Kundin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f0aa751
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f0aa751
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f0aa751

Branch: refs/heads/trunk
Commit: 6f0aa75121224589fe1e20630c597f851ef3bed2
Parents: 25064fb
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Wed Jun 15 11:41:49 2016 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Wed Jun 15 11:41:49 2016 -0700

--
 .../hadoop/security/LdapGroupsMapping.java  | 114 ---
 .../src/main/resources/core-default.xml |  13 +++
 .../hadoop/security/TestLdapGroupsMapping.java  |  62 --
 .../security/TestLdapGroupsMappingBase.java |  33 +-
 .../TestLdapGroupsMappingWithPosixGroup.java|   2 +-
 5 files changed, 198 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f0aa751/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index da87369..5a0b1d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Hashtable;
 import java.util.List;
+import java.util.HashSet;
+import java.util.Collection;
+import java.util.Set;
 
 import javax.naming.Context;
 import javax.naming.NamingEnumeration;
@@ -66,9 +69,11 @@ import org.apache.hadoop.conf.Configuration;
  * is used for searching users or groups which returns more results than are
  * allowed by the server, an exception will be thrown.
  * 
- * The implementation also does not attempt to resolve group hierarchies. In
- * order to be considered a member of a group, the user must be an explicit
- * member in LDAP.
+ * The implementation attempts to resolve group hierarchies,
+ * to a configurable limit.
+ * If the limit is 0, in order to be considered a member of a group,
+ * the user must be an explicit member in LDAP.  Otherwise, it will traverse 
the
+ * group hierarchy n levels up.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
@@ -157,6 +162,13 @@ public class LdapGroupsMapping
   public static final String GROUP_NAME_ATTR_DEFAULT = "cn";
 
   /*
+   * How many levels to traverse when checking for groups in the org hierarchy
+   */
+  public static final String GROUP_HIERARCHY_LEVELS_KEY =
+LDAP_CONFIG_PREFIX + ".search.group.hierarchy.levels";
+  public static final int GROUP_HIERARCHY_LEVELS_DEFAULT = 0;
+
+  /*
* LDAP attribute names to use when doing posix-like lookups
*/
   public static final String POSIX_UID_ATTR_KEY = LDAP_CONFIG_PREFIX + 
".posix.attr.uid.name";
@@ -208,6 +220,7 @@ public class LdapGroupsMapping
   private String memberOfAttr;
   private String groupMemberAttr;
   private String groupNameAttr;
+  private intgroupHierarchyLevels;
   private String posixUidAttr;
   private String posixGidAttr;
   private boolean isPosix;
@@ -234,7 +247,7 @@ public class LdapGroupsMapping
  */
 for(int retry = 0; retry < RECONNECT_RETRY_COUNT; retry++) {
   try {
-return doGetGroups(user);
+return doGetGroups(user, groupHierarchyLevels);
   } catch (NamingException e) {
 LOG.warn("Failed to get groups for user " + user + " (retry=" + retry
 + ") by " + e);
@@ -324,9 +337,11 @@ public class LdapGroupsMapping
* @return a list of strings representing group names of the user.
* @throws NamingException if unable to find group names
*/
-  private List lookupGroup(SearchResult result, DirContext c)
+  private List lookupGroup(SearchResult result, DirContext c,
+  int goUpHierarchy)
   throws NamingException {
 List groups = new ArrayList();
+Set groupDNs = new HashSet();
 
 NamingEnumeration groupResults = null;
 // perform the second LDAP query
@@ -345,12 +360,14 @@ public class LdapGroupsMapping
 if (groupResults != null) {
   while (groupResults.hasMoreElements()) {
 SearchResult groupResult = groupR

hadoop git commit: HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by Mingliang Liu.

2016-01-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3efc72ccd -> f1f61eac1


HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1f61eac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1f61eac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1f61eac

Branch: refs/heads/branch-2.8
Commit: f1f61eac15f9a4dcf80131d4bb54aabc42ea254b
Parents: 3efc72c
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 25 15:42:25 2016 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 25 17:06:23 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 48 ++--
 2 files changed, 37 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1f61eac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 94eed3e..e66ec7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1733,6 +1733,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
+HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
+    via jitendra)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1f61eac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 13e8644..e8cd476 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -21,11 +21,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -49,10 +53,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestLeaseRecovery2 {
   
@@ -85,12 +90,15 @@ public class TestLeaseRecovery2 {
* 
* @throws IOException
*/
-  @BeforeClass
-  public static void startUp() throws IOException {
+  @Before
+  public void startUp() throws IOException {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
+cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.checkExitOnShutdown(false)
+.build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
   }
@@ -99,8 +107,8 @@ public class TestLeaseRecovery2 {
* stop the cluster
* @throws IOException
*/
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 if (cluster != null) {
   IOUtils.closeStream(dfs);
   cluster.shutdown();
@@ -419,17 +427,17 @@ public class TestLeaseRecovery2 {
* 
* @throws Exception
*/
-  @Test
+  @Test(timeout = 3)

hadoop git commit: HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by Mingliang Liu.

2016-01-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec4d2d9f4 -> e8650fea1


HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8650fea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8650fea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8650fea

Branch: refs/heads/trunk
Commit: e8650fea1f0837026cbb36ae8bf51c6133259809
Parents: ec4d2d9
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 25 15:42:25 2016 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 25 16:08:46 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 48 ++--
 2 files changed, 37 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f35ae3d..68d5de6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2690,6 +2690,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
+HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
+    via jitendra)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 13e8644..e8cd476 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -21,11 +21,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -49,10 +53,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestLeaseRecovery2 {
   
@@ -85,12 +90,15 @@ public class TestLeaseRecovery2 {
* 
* @throws IOException
*/
-  @BeforeClass
-  public static void startUp() throws IOException {
+  @Before
+  public void startUp() throws IOException {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
+cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.checkExitOnShutdown(false)
+.build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
   }
@@ -99,8 +107,8 @@ public class TestLeaseRecovery2 {
* stop the cluster
* @throws IOException
*/
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 if (cluster != null) {
   IOUtils.closeStream(dfs);
   cluster.shutdown();
@@ -419,17 +427,17 @@ public class TestLeaseRecovery2 {
* 
* @throws Exception
*/
-  @Test
+  @Test(timeout = 3)

hadoop git commit: HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by Mingliang Liu.

2016-01-25 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 47b20d0c7 -> 433497618


HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43349761
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43349761
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43349761

Branch: refs/heads/branch-2
Commit: 4334976187100afe3be499d63ead8f17f09f8a14
Parents: 47b20d0
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Mon Jan 25 15:42:25 2016 -0800
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Mon Jan 25 17:06:36 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 48 ++--
 2 files changed, 37 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43349761/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index de7b91d..3e82571 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1803,6 +1803,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
+HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
+    via jitendra)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43349761/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 13e8644..e8cd476 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -21,11 +21,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -49,10 +53,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestLeaseRecovery2 {
   
@@ -85,12 +90,15 @@ public class TestLeaseRecovery2 {
* 
* @throws IOException
*/
-  @BeforeClass
-  public static void startUp() throws IOException {
+  @Before
+  public void startUp() throws IOException {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
+cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.checkExitOnShutdown(false)
+.build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
   }
@@ -99,8 +107,8 @@ public class TestLeaseRecovery2 {
* stop the cluster
* @throws IOException
*/
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 if (cluster != null) {
   IOUtils.closeStream(dfs);
   cluster.shutdown();
@@ -419,17 +427,17 @@ public class TestLeaseRecovery2 {
* 
* @throws Exception
*/
-  @Test
+  @Test(timeout = 3)

hadoop git commit: HDFS-9184. Logging HDFS operation's caller context into audit logs. Contributed by Mingliang Liu.

2015-10-23 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk eb6379ca2 -> 600ad7bf4


HDFS-9184. Logging HDFS operation's caller context into audit logs. Contributed 
by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/600ad7bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/600ad7bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/600ad7bf

Branch: refs/heads/trunk
Commit: 600ad7bf4104bcaeec00a4089d59bb1fdf423299
Parents: eb6379c
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Oct 23 12:15:01 2015 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Oct 23 12:15:01 2015 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |  11 ++
 .../org/apache/hadoop/ipc/CallerContext.java| 147 
 .../main/java/org/apache/hadoop/ipc/Server.java |  22 ++-
 .../java/org/apache/hadoop/util/ProtoUtil.java  |  13 ++
 .../src/main/proto/RpcHeader.proto  |   9 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  42 -
 .../hdfs/server/namenode/HdfsAuditLogger.java   |   7 +-
 .../server/namenode/TestAuditLogAtDebug.java|   2 +-
 .../hdfs/server/namenode/TestAuditLogger.java   | 176 +++
 10 files changed, 421 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/600ad7bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 9fff33e..f75edd5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -183,6 +183,17 @@ public class CommonConfigurationKeysPublic {
   /** Default value for TFILE_FS_OUTPUT_BUFFER_SIZE_KEY */
   public static final int TFILE_FS_OUTPUT_BUFFER_SIZE_DEFAULT = 256*1024;
 
+  public static final String  HADOOP_CALLER_CONTEXT_ENABLED_KEY =
+  "hadoop.caller.context.enabled";
+  public static final boolean HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT = false;
+  public static final String  HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY =
+  "hadoop.caller.context.max.size";
+  public static final int HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT = 128;
+  public static final String  HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY =
+  "hadoop.caller.context.signature.max.size";
+  public static final int HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_DEFAULT 
=
+  40;
+
   /** See core-default.xml */
   public static final String  IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY =
 "ipc.client.connection.maxidletime";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600ad7bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
new file mode 100644
index 000..8be7e35
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.nio.charset.C

hadoop git commit: HDFS-9184. Logging HDFS operation's caller context into audit logs. Contributed by Mingliang Liu.

2015-10-23 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 801ce47e2 -> 63cebf57d


HDFS-9184. Logging HDFS operation's caller context into audit logs. Contributed 
by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63cebf57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63cebf57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63cebf57

Branch: refs/heads/branch-2
Commit: 63cebf57d0235e2fa7f42b25b27c244c568d742b
Parents: 801ce47
Author: Jitendra Pandey <jiten...@apache.org>
Authored: Fri Oct 23 12:15:01 2015 -0700
Committer: Jitendra Pandey <jiten...@apache.org>
Committed: Fri Oct 23 12:17:20 2015 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |  11 ++
 .../org/apache/hadoop/ipc/CallerContext.java| 147 
 .../main/java/org/apache/hadoop/ipc/Server.java |  22 ++-
 .../java/org/apache/hadoop/util/ProtoUtil.java  |  13 ++
 .../src/main/proto/RpcHeader.proto  |   9 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  41 -
 .../hdfs/server/namenode/HdfsAuditLogger.java   |   7 +-
 .../server/namenode/TestAuditLogAtDebug.java|   2 +-
 .../hdfs/server/namenode/TestAuditLogger.java   | 176 +++
 10 files changed, 421 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63cebf57/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index d682f33..05a1dcd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -188,6 +188,17 @@ public class CommonConfigurationKeysPublic {
   /** Default value for TFILE_FS_OUTPUT_BUFFER_SIZE_KEY */
   public static final int TFILE_FS_OUTPUT_BUFFER_SIZE_DEFAULT = 256*1024;
 
+  public static final String  HADOOP_CALLER_CONTEXT_ENABLED_KEY =
+  "hadoop.caller.context.enabled";
+  public static final boolean HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT = false;
+  public static final String  HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY =
+  "hadoop.caller.context.max.size";
+  public static final int HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT = 128;
+  public static final String  HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY =
+  "hadoop.caller.context.signature.max.size";
+  public static final int HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_DEFAULT 
=
+  40;
+
   /** See core-default.xml */
   public static final String  IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY =
 "ipc.client.connection.maxidletime";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63cebf57/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
new file mode 100644
index 000..8be7e35
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.nio.charset.C

  1   2   3   4   5   6   >