[06/50] [abbrv] hadoop git commit: HDDS-518. Implement PutObject Rest endpoint. Contributed by chencan.

2018-10-16 Thread shv
HDDS-518. Implement PutObject Rest endpoint. Contributed by chencan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae268b48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae268b48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae268b48

Branch: refs/heads/HDFS-12943
Commit: ae268b48943dcce1adacd4bf01231fcb21d76f3a
Parents: cdad91c
Author: Márton Elek 
Authored: Sat Oct 13 15:55:57 2018 +0200
Committer: Márton Elek 
Committed: Sat Oct 13 15:55:57 2018 +0200

--
 .../hadoop/ozone/s3/object/PutObject.java   | 53 +---
 .../hadoop/ozone/s3/object/TestPutObject.java   | 89 
 2 files changed, 131 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae268b48/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/object/PutObject.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/object/PutObject.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/object/PutObject.java
index 53e63d8..84c25eb 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/object/PutObject.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/object/PutObject.java
@@ -17,45 +17,76 @@
  */
 package org.apache.hadoop.ozone.s3.object;
 
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.HeaderParam;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
 import java.io.IOException;
 import java.io.InputStream;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.s3.EndpointBase;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.ozone.s3.EndpointBase;
+import org.apache.http.HttpStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * File upload.
  */
-@Path("/{volume}/{bucket}/{path:.+}")
+@Path("/{bucket}/{path:.+}")
 public class PutObject extends EndpointBase {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(PutObject.class);
+
   @PUT
   @Produces(MediaType.APPLICATION_XML)
-  public void put(
-  @PathParam("volume") String volumeName,
+  public Response put(
+  @Context HttpHeaders headers,
   @PathParam("bucket") String bucketName,
   @PathParam("path") String keyPath,
+  @DefaultValue("STAND_ALONE" ) @QueryParam("replicationType")
+  ReplicationType replicationType,
+  @DefaultValue("ONE") @QueryParam("replicationFactor")
+  ReplicationFactor replicationFactor,
+  @DefaultValue("32 * 1024 * 1024") @QueryParam("chunkSize")
+  String chunkSize,
   @HeaderParam("Content-Length") long length,
   InputStream body) throws IOException {
 
-OzoneBucket bucket = getBucket(volumeName, bucketName);
+try {
+  Configuration config = new OzoneConfiguration();
+  config.set(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
+
+  OzoneBucket bucket = getVolume(getOzoneVolumeName(bucketName))
+  .getBucket(bucketName);
+  OzoneOutputStream output = bucket
+  .createKey(keyPath, length, replicationType, replicationFactor);
 
-OzoneOutputStream output = bucket
-.createKey(keyPath, length, ReplicationType.STAND_ALONE,
-ReplicationFactor.ONE);
+  IOUtils.copy(body, output);
+  output.close();
 
-IOUtils.copy(body, output);
-output.close();
+  return Response.ok().status(HttpStatus.SC_OK)
+  .header("Content-Length", length)
+  .build();
+} catch (IOException ex) {
+  LOG.error("Exception occurred in PutObject", ex);
+  throw ex;
+}
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae268b48/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
new file mode 100644
index 000..a8f0648
--- /dev/null
+++ 
b/hadoop-ozone/s3gateway/src/te

[15/50] [abbrv] hadoop git commit: YARN-8836. Add tags and attributes in resource definition. Contributed by Weiwei Yang.

2018-10-16 Thread shv
YARN-8836. Add tags and attributes in resource definition. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e5365e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e5365e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e5365e2

Branch: refs/heads/HDFS-12943
Commit: 8e5365e277a184ff65f2f6bca2bf037d1a9f3fd0
Parents: 5033deb
Author: Sunil G 
Authored: Mon Oct 15 15:38:42 2018 +0530
Committer: Sunil G 
Committed: Mon Oct 15 15:38:42 2018 +0530

--
 .../yarn/api/records/ResourceInformation.java   |  68 +-
 .../yarn/util/resource/ResourceUtils.java   |   9 +-
 .../src/main/proto/yarn_protos.proto|   2 +
 .../yarn/conf/TestResourceInformation.java  |  44 +++
 .../yarn/api/records/impl/pb/ProtoUtils.java|  28 
 .../api/records/impl/pb/ResourcePBImpl.java |  23 
 .../hadoop/yarn/api/TestResourcePBImpl.java | 128 ++-
 .../resource-types/resource-types-5.xml |  53 
 8 files changed, 350 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e5365e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index c83c3a2..057e94e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -19,11 +19,15 @@
 package org.apache.hadoop.yarn.api.records;
 
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Class to encapsulate information about a Resource - the name of the 
resource,
@@ -37,6 +41,8 @@ public class ResourceInformation implements 
Comparable {
   private long value;
   private long minimumAllocation;
   private long maximumAllocation;
+  private Set tags = new HashSet<>();
+  private Map attributes = new HashMap<>();
 
   // Known resource types
   public static final String MEMORY_URI = "memory-mb";
@@ -185,6 +191,42 @@ public class ResourceInformation implements 
Comparable {
   }
 
   /**
+   * Get the attributes of the resource.
+   * @return resource attributes
+   */
+  public Map getAttributes() {
+return attributes;
+  }
+
+  /**
+   * Set a map of attributes to the resource.
+   * @param attributes resource attributes
+   */
+  public void setAttributes(Map attributes) {
+if (attributes != null) {
+  this.attributes = attributes;
+}
+  }
+
+  /**
+   * Get resource tags.
+   * @return resource tags
+   */
+  public Set getTags() {
+return this.tags;
+  }
+
+  /**
+   * Add tags to the resource.
+   * @param tags resource tags
+   */
+  public void setTags(Set tags) {
+if (tags != null) {
+  this.tags = tags;
+}
+  }
+
+  /**
* Create a new instance of ResourceInformation from another object.
*
* @param other the object from which the new object should be created
@@ -199,6 +241,15 @@ public class ResourceInformation implements 
Comparable {
   public static ResourceInformation newInstance(String name, String units,
   long value, ResourceTypes type, long minimumAllocation,
   long maximumAllocation) {
+return ResourceInformation.newInstance(name, units, value, type,
+minimumAllocation, maximumAllocation,
+ImmutableSet.of(), ImmutableMap.of());
+  }
+
+  public static ResourceInformation newInstance(String name, String units,
+  long value, ResourceTypes type, long minimumAllocation,
+  long maximumAllocation,
+  Set tags, Map attributes) {
 ResourceInformation ret = new ResourceInformation();
 ret.setName(name);
 ret.setResourceType(type);
@@ -206,6 +257,8 @@ public class ResourceInformation implements 
Comparable {
 ret.setValue(value);
 ret.setMinimumAllocation(minimumAllocation);
 ret.setMaximumAllocation(maximumAllocation);
+ret.setTags(tags);
+ret.setAttributes(attributes);
 return ret;
   }
 
@@ -258,13 +311,16 @@ public class ResourceInformation implements 
Comparable {
 dst.

[01/50] [abbrv] hadoop git commit: HDDS-587. Add new classes for pipeline management. Contributed by Lokesh Jain.

2018-10-16 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 bba8aa3d9 -> 3a5b78ac4


HDDS-587. Add new classes for pipeline management. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c8e023b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c8e023b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c8e023b

Branch: refs/heads/HDFS-12943
Commit: 5c8e023ba32da3e65193f6ced354efe830dba75d
Parents: c07b95b
Author: Nandakumar 
Authored: Sat Oct 13 03:22:20 2018 +0530
Committer: Nandakumar 
Committed: Sat Oct 13 03:22:20 2018 +0530

--
 .../hadoop/hdds/scm/pipeline/Pipeline.java  | 211 
 .../hadoop/hdds/scm/pipeline/PipelineID.java|  80 ++
 .../hadoop/hdds/scm/pipeline/package-info.java  |  24 ++
 .../hdds/scm/pipeline/PipelineFactory.java  |  56 +
 .../hdds/scm/pipeline/PipelineManager.java  |  58 +
 .../hdds/scm/pipeline/PipelineProvider.java |  35 +++
 .../hdds/scm/pipeline/PipelineStateManager.java | 179 ++
 .../hdds/scm/pipeline/PipelineStateMap.java | 212 
 .../scm/pipeline/RatisPipelineProvider.java | 135 ++
 .../hdds/scm/pipeline/SCMPipelineManager.java   | 226 +
 .../scm/pipeline/SimplePipelineProvider.java|  80 ++
 .../hadoop/hdds/scm/pipeline/package-info.java  |  24 ++
 .../scm/pipeline/TestPipelineStateManager.java  | 246 +++
 .../scm/pipeline/TestRatisPipelineProvider.java | 104 
 .../pipeline/TestSimplePipelineProvider.java| 102 
 15 files changed, 1772 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c8e023b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
new file mode 100644
index 000..b58a001
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Represents a group of datanodes which store a container.
+ */
+public final class Pipeline {
+
+  private final PipelineID id;
+  private final ReplicationType type;
+  private final ReplicationFactor factor;
+
+  private LifeCycleState state;
+  private List nodes;
+
+  private Pipeline(PipelineID id, ReplicationType type,
+  ReplicationFactor factor, LifeCycleState state,
+  List nodes) {
+this.id = id;
+this.type = type;
+this.factor = factor;
+this.state = state;
+this.nodes = nodes;
+  }
+
+  /**
+   * Returns the ID of this pipeline.
+   *
+   * @return PipelineID
+   */
+  public PipelineID getID() {
+return id;
+  }
+
+  /**
+   * Returns the type.
+   *
+   * @return type - Simple or Ratis.
+   */
+  public ReplicationType getType() {
+return type;
+  }
+
+  /**
+   * Returns the factor.
+   *
+   * @return type - Simple or Ratis.
+   */
+  public ReplicationFactor getFactor() {
+return factor;
+  }
+
+  /**
+   * Returns the State of the pipeline.
+   *
+   * @return - LifeCycleStates.
+   */
+  public LifeCycleState getLifeCycleState() {
+return state;
+  }
+
+  /**
+   * Retu

[09/50] [abbrv] hadoop git commit: HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.

2018-10-16 Thread shv
HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0473b680
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0473b680
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0473b680

Branch: refs/heads/HDFS-12943
Commit: 0473b6817cfe4f03acdcb0eedc78b509244f
Parents: 9227f3d
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:12:08 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:12:08 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 ---
 .../common/helpers/KeyValueContainerReport.java | 117 ---
 .../container/common/interfaces/Container.java  |   5 +
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 ++
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 +
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 +
 12 files changed, 208 insertions(+), 328 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0473b680/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific langu

[13/50] [abbrv] hadoop git commit: HADOOP-15849. Upgrade netty version to 3.10.6.

2018-10-16 Thread shv
HADOOP-15849. Upgrade netty version to 3.10.6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8853fc8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8853fc8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8853fc8a

Branch: refs/heads/HDFS-12943
Commit: 8853fc8a55b07ecdc5ce8d85278b822e5675d97a
Parents: 603649d
Author: Xiao Chen 
Authored: Sat Oct 13 20:21:36 2018 -0700
Committer: Xiao Chen 
Committed: Sat Oct 13 20:22:02 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8853fc8a/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b850c7c..4cdbcfb 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -961,7 +961,7 @@
   
 io.netty
 netty
-3.10.5.Final
+3.10.6.Final
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed by Robert Kanter)

2018-10-16 Thread shv
YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed 
by Robert Kanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2288ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2288ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2288ac4

Branch: refs/heads/HDFS-12943
Commit: c2288ac45b748b4119442c46147ccc324926c340
Parents: d59ca43
Author: Haibo Chen 
Authored: Tue Oct 16 13:36:26 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 13:36:26 2018 -0700

--
 .../org/apache/hadoop/security/Credentials.java |   1 +
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  21 +-
 .../hadoop/yarn/api/ApplicationConstants.java   |  22 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  20 +
 .../src/main/resources/yarn-default.xml |  18 +
 .../yarn/server/security/AMSecretKeys.java  |  45 ++
 .../nodemanager/DefaultContainerExecutor.java   |  14 +
 .../nodemanager/LinuxContainerExecutor.java |   4 +
 .../launcher/ContainerLaunch.java   |  50 ++
 .../launcher/ContainerRelaunch.java |  23 +
 .../runtime/DefaultLinuxContainerRuntime.java   |  14 +-
 .../runtime/DockerLinuxContainerRuntime.java|  36 +-
 .../runtime/LinuxContainerRuntimeConstants.java |   4 +
 .../executor/ContainerStartContext.java |  24 +
 .../impl/container-executor.c   | 121 -
 .../impl/container-executor.h   |  17 +-
 .../main/native/container-executor/impl/main.c  |  35 +-
 .../main/native/container-executor/impl/util.h  |   6 +-
 .../test/test-container-executor.c  | 228 +---
 .../TestDefaultContainerExecutor.java   | 157 ++
 .../TestLinuxContainerExecutorWithMocks.java| 115 ++--
 .../launcher/TestContainerLaunch.java   | 149 ++
 .../launcher/TestContainerRelaunch.java |  32 +-
 .../runtime/TestDockerContainerRuntime.java | 225 
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  11 +
 .../server/resourcemanager/ResourceManager.java |   9 +
 .../resourcemanager/amlauncher/AMLauncher.java  |  29 ++
 .../security/ProxyCAManager.java|  68 +++
 .../TestApplicationMasterLauncher.java  |  86 ++-
 .../security/TestProxyCAManager.java|  51 ++
 .../hadoop/yarn/server/webproxy/ProxyCA.java| 408 +++
 .../yarn/server/webproxy/WebAppProxy.java   |   1 +
 .../server/webproxy/WebAppProxyServlet.java |  89 +++-
 .../yarn/server/webproxy/TestProxyCA.java   | 518 +++
 .../server/webproxy/TestWebAppProxyServlet.java |  58 ++-
 37 files changed, 2406 insertions(+), 323 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index 6a9527a..4fafa4a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class Credentials implements Writable {
+
   public enum SerializedFormat {
 WRITABLE((byte) 0x00),
 PROTOBUF((byte) 0x01);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
index 898c94e..1870b22 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -50,6 +51,7 @@ import java.security.NoSuchProviderException;
 import java.se

[11/50] [abbrv] hadoop git commit: HDDS-603. Add BlockCommitSequenceId field per Container and expose it in Container Reports. Contributed by Shashikant Banerjee.

2018-10-16 Thread shv
HDDS-603. Add BlockCommitSequenceId field per Container and expose it in 
Container Reports. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5209c750
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5209c750
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5209c750

Branch: refs/heads/HDFS-12943
Commit: 5209c7503bee7849d134c16213133b4fa0c330f6
Parents: 22f37af
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:15:42 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:15:42 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 ---
 .../common/helpers/KeyValueContainerReport.java | 117 ---
 .../container/common/interfaces/Container.java  |   5 +
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 ++
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 +
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 +
 12 files changed, 208 insertions(+), 328 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5209c750/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for t

[45/50] [abbrv] hadoop git commit: YARN-8842. Expose metrics for custom resource types in QueueMetrics. (Contributed by Szilard Nemeth)

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
new file mode 100644
index 000..76a9849
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
@@ -0,0 +1,645 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.QueueMetricsForCustomResources.QueueMetricsCustomResource;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
+import static org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper
+.extractCustomResourcesAsStrings;
+import static 
org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper.newResource;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_ALLOCATED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_RELEASED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+
.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_MEMORY_MB_SECONDS_PREEMPTED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+
.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_VCORE_SECONDS_PREEMPTED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_CONTAINERS;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_MB;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_V_CORES;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AVAILABLE_MB;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AVAILABLE_V_CORES;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.PENDING_CONTAINERS;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.PENDING_MB;
+import static org.apache.hadoop

[17/50] [abbrv] hadoop git commit: YARN-8830. SLS tool fix node addition. Contributed by Bibin A Chundatt.

2018-10-16 Thread shv
YARN-8830. SLS tool fix node addition. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4a38e7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4a38e7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4a38e7b

Branch: refs/heads/HDFS-12943
Commit: b4a38e7b3e530756fb79d23dd4e218beeb5e3190
Parents: b60ca37
Author: bibinchundatt 
Authored: Mon Oct 15 16:10:25 2018 +0530
Committer: bibinchundatt 
Committed: Mon Oct 15 16:10:25 2018 +0530

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 29 
 1 file changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4a38e7b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 1e83e40..1fadd42 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -45,6 +45,8 @@ import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
+import org.apache.commons.collections.SetUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -1029,5 +1031,32 @@ public class SLSRunner extends Configured implements 
Tool {
 public void setLabels(Set labels) {
   this.labels = labels;
 }
+
+@Override
+public boolean equals(Object o) {
+  if (this == o) {
+return true;
+  }
+  if (!(o instanceof NodeDetails)) {
+return false;
+  }
+
+  NodeDetails that = (NodeDetails) o;
+
+  return StringUtils.equals(hostname, that.hostname) && (
+  nodeResource == null ?
+  that.nodeResource == null :
+  nodeResource.equals(that.nodeResource)) && SetUtils
+  .isEqualSet(labels, that.labels);
+}
+
+@Override
+public int hashCode() {
+  int result = hostname == null ? 0 : hostname.hashCode();
+  result =
+  31 * result + (nodeResource == null ? 0 : nodeResource.hashCode());
+  result = 31 * result + (labels == null ? 0 : labels.hashCode());
+  return result;
+}
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HADOOP-15851. Disable wildfly logs to the console. Contributed by Vishwajeet Dusane.

2018-10-16 Thread shv
HADOOP-15851. Disable wildfly logs to the console.
Contributed by Vishwajeet Dusane.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef9dc6c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef9dc6c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef9dc6c4

Branch: refs/heads/HDFS-12943
Commit: ef9dc6c44c686e836bb25e31ff355cff80572d23
Parents: e13a38f
Author: Steve Loughran 
Authored: Mon Oct 15 22:53:55 2018 +0100
Committer: Steve Loughran 
Committed: Mon Oct 15 22:53:55 2018 +0100

--
 .../org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9dc6c4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
index 00e7786..6d3e4ea 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
@@ -25,6 +25,7 @@ import java.net.SocketException;
 import java.security.KeyManagementException;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
+import java.util.logging.Level;
 
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.SSLSocket;
@@ -33,6 +34,7 @@ import javax.net.ssl.SSLSocketFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.wildfly.openssl.OpenSSLProvider;
+import org.wildfly.openssl.SSL;
 
 
 /**
@@ -118,6 +120,7 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
 switch (preferredChannelMode) {
   case Default:
 try {
+  
java.util.logging.Logger.getLogger(SSL.class.getName()).setLevel(Level.WARNING);
   ctx = SSLContext.getInstance("openssl.TLS");
   ctx.init(null, null, null);
   channelMode = SSLChannelMode.OpenSSL;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-16 Thread shv
Fix potential FSImage corruption. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b60ca379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b60ca379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b60ca379

Branch: refs/heads/HDFS-12943
Commit: b60ca37914b22550e3630fa02742d40697decb31
Parents: 8e5365e
Author: Vinayakumar B 
Authored: Mon Oct 15 15:48:26 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 15:48:26 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 ++---
 .../server/namenode/FSImageFormatPBINode.java   | 101 
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++--
 .../namenode/INodeWithAdditionalFields.java |  36 -
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../server/namenode/SerialNumberManager.java| 152 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +++-
 .../hdfs/server/namenode/XAttrStorage.java  |  11 --
 .../tools/offlineImageViewer/FSImageLoader.java |  18 ++-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 15 files changed, 383 insertions(+), 251 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60ca379/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60ca379/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -31,25 +31,23 @@ import com.google.common.collect.ImmutableList;
 /**
  * Class to pack an AclEntry into an integer. 
  * An ACL entry is represented by a 32-bit integer in Big Endian format. 
- * The bits can be divided in four segments: 
- * [0:1) || [1:3) || [3:6) || [6:7) || [7:32) 
- * 
- * [0:1) -- the scope of the entry (AclEntryScope) 
- * [1:3) -- the type of the entry (AclEntryType) 
- * [3:6) -- the permission of the entry (FsAction) 
- * [6:7) -- A flag to indicate whether Named entry or not 
- * [7:8) -- Reserved 
- * [8:32) -- the name of the entry, which is an ID that points to a 
- * string in the StringTableSection. 
+ *
+ * Note:  this format is used both in-memory and on-disk.  Changes will be
+ * incompatible.
+ *
  */
-public enum AclEntryStatusFormat {
+public enum AclEntryStatusFormat implements LongBitFormat.Enum {
+
+  PERMISSION(null, 3),
+  TYPE(PERMISSION.BITS, 2),
+  SCOPE(TYPE.BITS, 1),
+  NAME(SCOPE.BITS, 24);
 
-  SCOPE(null, 1),
-  TYPE(SCOPE.BITS, 2),
-  PERMISSION(TYPE.BITS, 3),
-  NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
-  RESERVED(NAMED_ENTRY_CHECK.BITS, 1),
-  NAME(RESERVED.BITS, 24);
+  private static final FsAction[] FSACTION_VALUES = FsAction.values();
+  private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
+  AclEntryScope.values();
+  private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
+  AclEntryType.values();
 
 

[37/50] [abbrv] hadoop git commit: YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed by Robert Kanter)

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index a110f10..2304501c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -37,6 +37,8 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.io.LineNumberReader;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -88,12 +90,11 @@ public class TestLinuxContainerExecutorWithMocks {
   private static final Logger LOG =
LoggerFactory.getLogger(TestLinuxContainerExecutorWithMocks.class);
 
-  private static final String MOCK_EXECUTOR =
-  "./src/test/resources/mock-container-executor";
+  private static final String MOCK_EXECUTOR = "mock-container-executor";
   private static final String MOCK_EXECUTOR_WITH_ERROR =
-  "./src/test/resources/mock-container-executer-with-error";
+  "mock-container-executer-with-error";
   private static final String MOCK_EXECUTOR_WITH_CONFIG_ERROR =
-  "./src/test/resources/mock-container-executer-with-configuration-error";
+  "mock-container-executer-with-configuration-error";
 
   private String tmpMockExecutor;
   private LinuxContainerExecutor mockExec = null;
@@ -121,11 +122,13 @@ public class TestLinuxContainerExecutorWithMocks {
 return ret;
   }
 
-  private void setupMockExecutor(String executorPath, Configuration conf)
-  throws IOException {
+  private void setupMockExecutor(String executorName, Configuration conf)
+  throws IOException, URISyntaxException {
 //we'll always use the tmpMockExecutor - since
 // PrivilegedOperationExecutor can only be initialized once.
 
+URI executorPath = getClass().getClassLoader().getResource(executorName)
+.toURI();
 Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor),
 REPLACE_EXISTING);
 
@@ -140,7 +143,8 @@ public class TestLinuxContainerExecutorWithMocks {
   }
 
   @Before
-  public void setup() throws IOException, ContainerExecutionException {
+  public void setup() throws IOException, ContainerExecutionException,
+  URISyntaxException {
 assumeNotWindows();
 
 tmpMockExecutor = System.getProperty("test.build.data") +
@@ -172,7 +176,18 @@ public class TestLinuxContainerExecutorWithMocks {
   }
 
   @Test
-  public void testContainerLaunch()
+  public void testContainerLaunchWithoutHTTPS()
+  throws IOException, ConfigurationException {
+testContainerLaunch(false);
+  }
+
+  @Test
+  public void testContainerLaunchWithHTTPS()
+  throws IOException, ConfigurationException {
+testContainerLaunch(true);
+  }
+
+  private void testContainerLaunch(boolean https)
   throws IOException, ConfigurationException {
 String appSubmitter = "nobody";
 String cmd = String.valueOf(
@@ -193,41 +208,64 @@ public class TestLinuxContainerExecutorWithMocks {
 
 Path scriptPath = new Path("file:///bin/echo");
 Path tokensPath = new Path("file:///dev/null");
+Path keystorePath = new Path("file:///dev/null");
+Path truststorePath = new Path("file:///dev/null");
 Path workDir = new Path("/tmp");
 Path pidFile = new Path(workDir, "pid.txt");
 
 mockExec.activateContainer(cId, pidFile);
-int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
-.setContainer(container)
-.setNmPrivateContainerScriptPath(scriptPath)
-.setNmPrivateTokensPath(tokensPath)
-.setUser(appSubmitter)
-.setAppId(appId)
-.setContainerWorkDir(workDir)
-.setLocalDirs(dirsHandler.getLocalDirs())
-.setLogDirs(dirsHandler.getLogDirs())
-.setFilecacheDirs(new ArrayList<>())
-.setUserLocalDirs(new ArrayList<>())
-.setContainerLocalDirs(new ArrayList<>())
-.setContainerLogDirs(new ArrayList<>())
-.setUserFilecacheDirs(new ArrayList<>())
-.setApplicationLocalDirs(new ArrayList<>())
-.build());
+ContainerStartContext.Builder ctxBuilder =
+new Contai

[30/50] [abbrv] hadoop git commit: YARN-8879. Kerberos principal is needed when submitting a submarine job. Contributed by Zac Zhou.

2018-10-16 Thread shv
YARN-8879. Kerberos principal is needed when submitting a submarine job. 
Contributed by Zac Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/753f149f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/753f149f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/753f149f

Branch: refs/heads/HDFS-12943
Commit: 753f149fd3f5acf9a98cfc780d7899e307c19002
Parents: 0c2914e
Author: Sunil G 
Authored: Tue Oct 16 22:17:51 2018 +0530
Committer: Sunil G 
Committed: Tue Oct 16 22:17:51 2018 +0530

--
 .../org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java | 5 +
 .../apache/hadoop/yarn/service/utils/TestServiceApiUtil.java | 8 
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/753f149f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index d201c7d..f055981 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -245,15 +245,12 @@ public class ServiceApiUtil {
 
   public static void validateKerberosPrincipal(
   KerberosPrincipal kerberosPrincipal) throws IOException {
-try {
+if (!StringUtils.isEmpty(kerberosPrincipal.getPrincipalName())) {
   if (!kerberosPrincipal.getPrincipalName().contains("/")) {
 throw new IllegalArgumentException(String.format(
 RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT,
 kerberosPrincipal.getPrincipalName()));
   }
-} catch (NullPointerException e) {
-  throw new IllegalArgumentException(
-  RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_MISSING);
 }
 if (!StringUtils.isEmpty(kerberosPrincipal.getKeytab())) {
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/753f149f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
index 1e3c180..4940f8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
@@ -652,6 +652,14 @@ public class TestServiceApiUtil extends ServiceTestUtils {
 } catch (IllegalArgumentException e) {
   Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
 }
+
+kp.setPrincipalName(null);
+kp.setKeytab(null);
+try {
+  ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
+} catch (NullPointerException e) {
+Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+}
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-13987. RBF: Review of RandomResolver Class. Contributed by BELUGA BEHR.

2018-10-16 Thread shv
HDFS-13987. RBF: Review of RandomResolver Class. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee1c80ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee1c80ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee1c80ea

Branch: refs/heads/HDFS-12943
Commit: ee1c80ea324459852e8c1c73b2545a261cb02b6f
Parents: f880ff4
Author: Inigo Goiri 
Authored: Mon Oct 15 09:51:26 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Oct 15 09:51:26 2018 -0700

--
 .../resolver/order/RandomResolver.java  | 22 +++-
 1 file changed, 7 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1c80ea/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java
index 022aa48..13643e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java
@@ -17,15 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver.order;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
 
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Iterables;
 
 /**
  * Order the destinations randomly.
@@ -35,10 +35,6 @@ public class RandomResolver implements OrderedResolver {
   private static final Logger LOG =
   LoggerFactory.getLogger(RandomResolver.class);
 
-
-  /** Random number generator. */
-  private static final Random RANDOM = new Random();
-
   /**
* Get a random name space from the path.
*
@@ -47,16 +43,12 @@ public class RandomResolver implements OrderedResolver {
* @return Random name space.
*/
   public String getFirstNamespace(final String path, final PathLocation loc) {
-if (loc == null) {
-  return null;
-}
-Set namespaces = loc.getNamespaces();
-if (namespaces == null || namespaces.isEmpty()) {
+final Set namespaces = (loc == null) ? null : loc.getNamespaces();
+if (CollectionUtils.isEmpty(namespaces)) {
   LOG.error("Cannot get namespaces for {}", loc);
   return null;
 }
-List nssList = new ArrayList<>(namespaces);
-int index = RANDOM.nextInt(nssList.size());
-return nssList.get(index);
+final int index = ThreadLocalRandom.current().nextInt(namespaces.size());
+return Iterables.get(namespaces, index);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10, 4) is used. Contributed by Peter Bacsko.

2018-10-16 Thread shv
MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10,4) is 
used. Contributed by Peter Bacsko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f8fcb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f8fcb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f8fcb0

Branch: refs/heads/HDFS-12943
Commit: 25f8fcb06476938826cdc92858a61124b18cd98d
Parents: 753f149
Author: Xiao Chen 
Authored: Tue Oct 16 10:22:47 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 10:23:31 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MRConfig.java   |   2 +-
 .../src/main/resources/mapred-default.xml   |   2 +-
 .../split/TestJobSplitWriterWithEC.java | 128 +++
 3 files changed, 130 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index e85c893..b4d9149 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -78,7 +78,7 @@ public interface MRConfig {
 "mapreduce.task.max.status.length";
   public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
 
-  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 15;
   public static final String MAX_BLOCK_LOCATIONS_KEY =
 "mapreduce.job.max.split.locations";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 9f33d65..e5da41f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -149,7 +149,7 @@
 
   
 mapreduce.job.max.split.locations
-10
+15
 The max number of block locations to store for each split for
 locality calculation.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
new file mode 100644
index 000..23f8a40
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.split;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IO

[28/50] [abbrv] hadoop git commit: HDDS-657. Remove {volume} path segments from all the remaining rest endpoints. Contributed by Elek, Marton.

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2914e5/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
new file mode 100644
index 000..d32bc9f
--- /dev/null
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.HEAD;
+import javax.ws.rs.HeaderParam;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.Response.Status;
+import javax.ws.rs.core.StreamingOutput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.http.HttpStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Key level rest endpoints.
+ */
+@Path("/{bucket}/{path:.+}")
+public class ObjectEndpoint extends EndpointBase {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ObjectEndpoint.class);
+
+  private List customizableGetHeaders = new ArrayList<>();
+
+  public ObjectEndpoint() {
+customizableGetHeaders.add("Content-Type");
+customizableGetHeaders.add("Content-Language");
+customizableGetHeaders.add("Expires");
+customizableGetHeaders.add("Cache-Control");
+customizableGetHeaders.add("Content-Disposition");
+customizableGetHeaders.add("Content-Encoding");
+  }
+
+  /**
+   * Rest endpoint to upload object to a bucket.
+   * 
+   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html 
for
+   * more details.
+   */
+  @PUT
+  public Response put(
+  @Context HttpHeaders headers,
+  @PathParam("bucket") String bucketName,
+  @PathParam("path") String keyPath,
+  @DefaultValue("STAND_ALONE") @QueryParam("replicationType")
+  ReplicationType replicationType,
+  @DefaultValue("ONE") @QueryParam("replicationFactor")
+  ReplicationFactor replicationFactor,
+  @DefaultValue("32 * 1024 * 1024") @QueryParam("chunkSize")
+  String chunkSize,
+  @HeaderParam("Content-Length") long length,
+  InputStream body) throws IOException, OS3Exception {
+
+try {
+  Configuration config = new OzoneConfiguration();
+  config.set(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
+
+  OzoneBucket bucket = getBucket(bucketName);
+  OzoneOutputStream output = bucket
+  .createKey(keyPath, length, replicationType, replicationFactor);
+
+  IOUtils.copy(body, output);
+  output.close();
+
+  return Response.ok().status(HttpStatus.SC_OK)
+  .build();
+} catch (IOException ex) {
+  LOG.error("Exception occurred in PutObject", ex);
+  throw ex;
+}
+  }
+
+  /**
+   * Rest endpoint to download object from a bucket.
+   * 
+   * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html 
for
+   * more details.
+   */
+  @GET
+  public Response get(
+  @

[08/50] [abbrv] hadoop git commit: HDDS-653. TestMetadataStore#testIterator fails on Windows. Contributed by Yiqun Lin.

2018-10-16 Thread shv
HDDS-653. TestMetadataStore#testIterator fails on Windows.
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9227f3d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9227f3d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9227f3d2

Branch: refs/heads/HDFS-12943
Commit: 9227f3d22412f94d22f4cae6a4fb3a52ead9b011
Parents: addb846
Author: Anu Engineer 
Authored: Sat Oct 13 10:41:06 2018 -0700
Committer: Anu Engineer 
Committed: Sat Oct 13 10:41:06 2018 -0700

--
 .../src/test/java/org/apache/hadoop/utils/TestMetadataStore.java   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9227f3d2/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
index 30fc7f3..a91bc80 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
@@ -163,6 +163,8 @@ public class TestMetadataStore {
   GenericTestUtils.assertExceptionContains("Store has no more elements",
   ex);
 }
+dbStore.close();
+dbStore.destroy();
 FileUtils.deleteDirectory(dbDir);
 
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HDDS-613. Update HeadBucket, DeleteBucket to not to have volume in path. Contributed by Bharat Viswanadham.

2018-10-16 Thread shv
HDDS-613. Update HeadBucket, DeleteBucket to not to have volume in path. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28ca5c9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28ca5c9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28ca5c9d

Branch: refs/heads/HDFS-12943
Commit: 28ca5c9d1647837a1b2480d8935deffc6f68d807
Parents: 3c1fe07
Author: Bharat Viswanadham 
Authored: Fri Oct 12 16:59:11 2018 -0700
Committer: Bharat Viswanadham 
Committed: Fri Oct 12 16:59:11 2018 -0700

--
 .../dist/src/main/smoketest/commonlib.robot |  7 
 .../dist/src/main/smoketest/s3/bucketv2.robot   | 11 ++
 .../dist/src/main/smoketest/s3/bucketv4.robot   | 11 ++
 .../src/main/smoketest/s3/commonawslib.robot|  5 +++
 .../hadoop/ozone/s3/bucket/DeleteBucket.java|  7 ++--
 .../hadoop/ozone/s3/bucket/HeadBucket.java  |  9 ++---
 .../hadoop/ozone/client/ObjectStoreStub.java| 36 ++--
 .../hadoop/ozone/client/OzoneVolumeStub.java| 12 +--
 .../ozone/s3/bucket/TestDeleteBucket.java   | 21 
 .../hadoop/ozone/s3/bucket/TestHeadBucket.java  | 15 +++-
 10 files changed, 86 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ca5c9d/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot 
b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
index e2620fa..eb3a8bb 100644
--- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
@@ -22,3 +22,10 @@ Execute
 Log ${output}
 Should Be Equal As Integers ${rc}   0
 [return]${output}
+
+Execute and checkrc
+[arguments] ${command}  
${expected_error_code}
+${rc}   ${output} = Run And Return 
Rc And Output   ${command}
+Log ${output}
+Should Be Equal As Integers ${rc}   
${expected_error_code}
+[return]${output}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ca5c9d/hadoop-ozone/dist/src/main/smoketest/s3/bucketv2.robot
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketv2.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/bucketv2.robot
index eb7fa45..f17189b 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketv2.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketv2.robot
@@ -23,6 +23,7 @@ Resourcecommonawslib.robot
 ${ENDPOINT_URL}   http://s3g:9878
 ${OZONE_TEST} true
 ${BUCKET} generated
+${NONEXIST-BUCKET}generated1
 *** Keywords ***
 
 Install aws s3 cli
@@ -53,3 +54,13 @@ Create Bucket
 Should contain  ${result} Location
 
 Run Keyword if '${OZONE_TEST}' == 'true'Check Volume
+
+Head Bucket
+${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
+${result} = Execute AWSS3APICli and checkrc  head-bucket 
--bucket ${NONEXIST-BUCKET}  255
+Should contain  ${result}Not Found
+Should contain  ${result}404
+Delete Bucket
+${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
+${result} = Execute AWSS3APICli and checkrc  delete-bucket 
--bucket ${NONEXIST-BUCKET}  255
+Should contain  ${result}NoSuchBucket
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ca5c9d/hadoop-ozone/dist/src/main/smoketest/s3/bucketv4.robot
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketv4.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/bucketv4.robot
index 5d306aa..1a93690 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketv4.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketv4.robot
@@ -23,6 +23,7 @@ Resourcecommonawslib.robot
 ${ENDPOINT_URL}   http://s3g:9878
 ${OZONE_TEST} true
 ${BUCKET} generated
+${NONEXIST-BUCKET}generated1
 
 *** Keywords ***
 
@@ -58,3 +59,13 @@ Create Bucket
 Should contain  ${result} Location
 
 Run Keyword if '${OZONE_TEST}' == 'true'Check Volume
+
+Head Bucket
+${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
+  

[32/50] [abbrv] hadoop git commit: HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.

2018-10-16 Thread shv
HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e5173b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e5173b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e5173b

Branch: refs/heads/HDFS-12943
Commit: 53e5173bd1d970ec1c714568cbdb1c0dfd0fc6fb
Parents: 25f8fcb
Author: Jitendra Pandey 
Authored: Tue Oct 16 10:34:16 2018 -0700
Committer: Jitendra Pandey 
Committed: Tue Oct 16 10:34:51 2018 -0700

--
 .../hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java   | 4 ++--
 .../test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 7fa0cfb..67cda9f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -53,6 +53,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 public class BlockManagerImpl implements BlockManager {
 
   static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+  private static byte[] blockCommitSequenceIdKey =
+  DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 
   private Configuration config;
 
@@ -89,8 +91,6 @@ public class BlockManagerImpl implements BlockManager {
 Preconditions.checkNotNull(db, "DB cannot be null here");
 
 long blockCommitSequenceId = data.getBlockCommitSequenceId();
-byte[] blockCommitSequenceIdKey =
-DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
 
 // default blockCommitSequenceId for any block is 0. It the putBlock

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 71a4bef..ae52451 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
@@ -283,6 +284,7 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   scm.getClientProtocolServer().getScmInfo().getClusterId()));
   stop();
   FileUtils.deleteDirectory(baseDir);
+  ContainerCache.getInstance(conf).shutdownCache();
 } catch (IOException e) {
   LOG.error("Exception while shutting down the cluster.", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: YARN-8892. YARN UI2 doc changes to update security status (verified under security environment). (Sunil G via wangda)

2018-10-16 Thread shv
YARN-8892. YARN UI2 doc changes to update security status (verified under 
security environment). (Sunil G via wangda)

Change-Id: I8bc8622936861b8d6de3e42a0b75af86ad8a3961


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/538250db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/538250db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/538250db

Branch: refs/heads/HDFS-12943
Commit: 538250db26ce0b261bb74053348cddfc2d65cf52
Parents: 143d747
Author: Wangda Tan 
Authored: Tue Oct 16 13:41:17 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/538250db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 609ebe1..4c9daed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -52,4 +52,4 @@ Open your browser, go to `rm-address:8088/ui2` and try it!
 Notes
 -
 
-- This UI framework is not verified under security environment, please use 
with caution under security environment.
+This UI framework is verified under security environment as well.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDDS-439. 'ozone oz volume create' should default to current logged in user. Contributed by Dinesh Chitlangia.

2018-10-16 Thread shv
HDDS-439. 'ozone oz volume create' should default to current logged in user. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bf8a110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bf8a110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bf8a110

Branch: refs/heads/HDFS-12943
Commit: 0bf8a110a56539ee85de0fc24575dd9a4c66d01b
Parents: 2614078
Author: Arpit Agarwal 
Authored: Mon Oct 15 19:30:40 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Oct 15 19:30:40 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 22 
 .../web/ozShell/volume/CreateVolumeHandler.java |  7 +--
 2 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf8a110/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index d5f2554..03efa1c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.UUID;
 import java.util.stream.Collectors;
+
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.cli.MissingSubcommandException;
@@ -60,6 +61,7 @@ import org.apache.hadoop.ozone.web.response.BucketInfo;
 import org.apache.hadoop.ozone.web.response.KeyInfo;
 import org.apache.hadoop.ozone.web.response.VolumeInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -258,6 +260,26 @@ public class TestOzoneShell {
 exceptionHandler, args);
   }
 
+  /**
+   * Test to create volume without specifying --user or -u.
+   * @throws Exception
+   */
+  @Test
+  public void testCreateVolumeWithoutUser() throws Exception {
+String volumeName = "volume" + RandomStringUtils.randomNumeric(1);
+String[] args = new String[] {"volume", "create", url + "/" + volumeName,
+"--root"};
+
+execute(shell, args);
+
+String truncatedVolumeName =
+volumeName.substring(volumeName.lastIndexOf('/') + 1);
+OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName);
+assertEquals(truncatedVolumeName, volumeInfo.getName());
+assertEquals(UserGroupInformation.getCurrentUser().getUserName(),
+volumeInfo.getOwner());
+  }
+
   @Test
   public void testDeleteVolume() throws Exception {
 LOG.info("Running testDeleteVolume");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf8a110/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
index de0fbaa..84922a7 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Option;
 import picocli.CommandLine.Parameters;
@@ -45,8 +46,7 @@ public class CreateVolumeHandler extends Handler {
   private String uri;
 
   @Option(names = {"--user", "-u"},
-  description = "Owner of of the volume", required =
-  true)
+  description = "Owner of of the volume")
   private String userName;
 
   @Option(names = {"--quota", "-q"},
@@ -64,6 +64,9 @@ public class CreateVolumeHandler extends Handler {
*/
   @Override
   public Void call() throws Exception {
+if(userName == null) {
+  userName = UserGroupInformation.getCurrentUser().getUserName();
+}
 
 URI ozoneURI = verifyURI(uri);
 Path path = Paths.get(ozoneURI.getPath());


-

[40/50] [abbrv] hadoop git commit: YARN-8875. [Submarine] Add documentation for submarine installation script details. (Xun Liu via wangda)

2018-10-16 Thread shv
YARN-8875. [Submarine] Add documentation for submarine installation script 
details. (Xun Liu via wangda)

Change-Id: I1c8d39c394e5a30f967ea514919835b951f2c124


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed08dd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed08dd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed08dd3b

Branch: refs/heads/HDFS-12943
Commit: ed08dd3b0c9cec20373e8ca4e34d6526bd759943
Parents: babd144
Author: Wangda Tan 
Authored: Tue Oct 16 13:36:09 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:01 2018 -0700

--
 .../src/site/markdown/HowToInstall.md   |  36 +++
 .../src/site/markdown/Index.md  |   4 +-
 .../src/site/markdown/InstallationGuide.md  | 205 +++
 .../src/site/markdown/InstallationScriptCN.md   | 242 ++
 .../src/site/markdown/InstallationScriptEN.md   | 250 +++
 .../src/site/markdown/TestAndTroubleshooting.md | 165 
 .../resources/images/submarine-installer.gif| Bin 0 -> 546547 bytes
 7 files changed, 724 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
new file mode 100644
index 000..05d87c1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
@@ -0,0 +1,36 @@
+
+
+# How to Install Dependencies
+
+Submarine project uses YARN Service, Docker container, and GPU (when GPU 
hardware available and properly configured).
+
+That means as an admin, you have to properly setup YARN Service related 
dependencies, including:
+- YARN Registry DNS
+
+Docker related dependencies, including:
+- Docker binary with expected versions.
+- Docker network which allows Docker container can talk to each other across 
different nodes.
+
+And when GPU wanna to be used:
+- GPU Driver.
+- Nvidia-docker.
+
+For your convenience, we provided installation documents to help you to setup 
your environment. You can always choose to have them installed in your own way.
+
+Use Submarine installer to install dependencies: 
[EN](InstallationScriptEN.html) [CN](InstallationScriptCN.html)
+
+Alternatively, you can follow manual install dependencies: 
[EN](InstallationGuide.html) [CN](InstallationGuideChineseVersion.html)
+
+Once you have installed dependencies, please follow following guide to 
[TestAndTroubleshooting](TestAndTroubleshooting.html).  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
index 0006f6c..baeaa15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
@@ -41,6 +41,4 @@ Click below contents if you want to understand more.
 
 - [Developer guide](DeveloperGuide.html)
 
-- [Installation guide](InstallationGuide.html)
-
-- [Installation guide Chinese version](InstallationGuideChineseVersion.html)
+- [Installation guides](HowToInstall.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
index d4f4269..4ef2bda 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
@@ -16,9 +16,11 @@
 
 ## Prerequisites
 
+(Please note that all following prerequisites are jus

[29/50] [abbrv] hadoop git commit: HDDS-657. Remove {volume} path segments from all the remaining rest endpoints. Contributed by Elek, Marton.

2018-10-16 Thread shv
HDDS-657. Remove {volume} path segments from all the remaining rest endpoints. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c2914e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c2914e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c2914e5

Branch: refs/heads/HDFS-12943
Commit: 0c2914e582587c066db1b2c1530e57d5c078577a
Parents: 0bf8a11
Author: Márton Elek 
Authored: Tue Oct 16 15:14:05 2018 +0200
Committer: Márton Elek 
Committed: Tue Oct 16 17:30:53 2018 +0200

--
 .../dist/src/main/smoketest/s3/README.md|  27 +++
 .../dist/src/main/smoketest/s3/__init__.robot   |  21 ++
 .../dist/src/main/smoketest/s3/awscli.robot |  63 --
 .../dist/src/main/smoketest/s3/awss3.robot  |  47 
 .../dist/src/main/smoketest/s3/buckethead.robot |  34 +++
 .../dist/src/main/smoketest/s3/bucketlist.robot |  32 +++
 .../dist/src/main/smoketest/s3/bucketv2.robot   |  66 --
 .../dist/src/main/smoketest/s3/bucketv4.robot   |  71 --
 .../src/main/smoketest/s3/commonawslib.robot|  43 +++-
 .../src/main/smoketest/s3/objectdelete.robot|  72 ++
 .../src/main/smoketest/s3/objectputget.robot|  42 
 hadoop-ozone/dist/src/main/smoketest/test.sh|   5 +-
 .../apache/hadoop/ozone/s3/EndpointBase.java| 158 -
 .../hadoop/ozone/s3/bucket/DeleteBucket.java|  67 --
 .../hadoop/ozone/s3/bucket/HeadBucket.java  |  61 -
 .../hadoop/ozone/s3/bucket/ListBucket.java  |  74 ---
 .../ozone/s3/bucket/ListBucketResponse.java |  55 -
 .../hadoop/ozone/s3/bucket/PutBucket.java   |  79 ---
 .../hadoop/ozone/s3/bucket/package-info.java|  30 ---
 .../ozone/s3/commontypes/IsoDateAdapter.java|   2 +-
 .../ozone/s3/endpoint/BucketEndpoint.java   | 199 +
 .../hadoop/ozone/s3/endpoint/EndpointBase.java  | 213 ++
 .../ozone/s3/endpoint/ListBucketResponse.java   |  55 +
 .../ozone/s3/endpoint/ListObjectResponse.java   | 158 +
 .../ozone/s3/endpoint/ObjectEndpoint.java   | 222 +++
 .../hadoop/ozone/s3/endpoint/RootEndpoint.java  |  82 +++
 .../hadoop/ozone/s3/endpoint/package-info.java  |  30 +++
 .../hadoop/ozone/s3/object/DeleteObject.java|  51 -
 .../hadoop/ozone/s3/object/HeadObject.java  |  74 ---
 .../hadoop/ozone/s3/object/ListObject.java  | 119 --
 .../ozone/s3/object/ListObjectResponse.java | 147 
 .../hadoop/ozone/s3/object/PutObject.java   |  92 
 .../hadoop/ozone/s3/object/package-info.java|  29 ---
 .../hadoop/ozone/client/OzoneVolumeStub.java|   3 +-
 .../ozone/s3/bucket/TestBucketResponse.java |  40 
 .../ozone/s3/bucket/TestDeleteBucket.java   |  99 -
 .../hadoop/ozone/s3/bucket/TestGetBucket.java   | 114 --
 .../hadoop/ozone/s3/bucket/TestHeadBucket.java  |  85 ---
 .../hadoop/ozone/s3/bucket/TestListBucket.java  |  97 
 .../hadoop/ozone/s3/bucket/package-info.java|  21 --
 .../ozone/s3/endpoint/TestBucketDelete.java | 100 +
 .../hadoop/ozone/s3/endpoint/TestBucketGet.java | 115 ++
 .../ozone/s3/endpoint/TestBucketHead.java   |  71 ++
 .../ozone/s3/endpoint/TestBucketResponse.java   |  38 
 .../ozone/s3/endpoint/TestObjectDelete.java |  60 +
 .../hadoop/ozone/s3/endpoint/TestObjectGet.java |  80 +++
 .../ozone/s3/endpoint/TestObjectHead.java   |  96 
 .../hadoop/ozone/s3/endpoint/TestPutObject.java |  91 
 .../hadoop/ozone/s3/endpoint/TestRootList.java  |  79 +++
 .../hadoop/ozone/s3/endpoint/package-info.java  |  21 ++
 .../ozone/s3/object/TestDeleteObject.java   |  56 -
 .../hadoop/ozone/s3/object/TestHeadObject.java  |  95 
 .../hadoop/ozone/s3/object/TestPutObject.java   |  89 
 .../hadoop/ozone/s3/object/package-info.java|  21 --
 54 files changed, 2032 insertions(+), 1959 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2914e5/hadoop-ozone/dist/src/main/smoketest/s3/README.md
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/README.md 
b/hadoop-ozone/dist/src/main/smoketest/s3/README.md
new file mode 100644
index 000..884ba2e
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/README.md
@@ -0,0 +1,27 @@
+
+
+## Ozone S3 Gatway Acceptance Tests
+
+Note: the aws cli based acceptance tests can be cross-checked with the 
original AWS s3 endpoint.
+
+You need to
+
+  1. Create a bucket
+  2. Configure your local aws cli
+  3. Set bucket/endpointurl during the robot test execution
+
+```
+robot -v bucket:ozonetest -v OZONE_S3_SET_CREDENTIALS:false -v 
ENDPOINT_URL:https://s3.us-east-2.amazona

[27/50] [abbrv] hadoop git commit: HDDS-657. Remove {volume} path segments from all the remaining rest endpoints. Contributed by Elek, Marton.

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2914e5/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestHeadObject.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestHeadObject.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestHeadObject.java
deleted file mode 100644
index 5b65dac..000
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestHeadObject.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.hadoop.ozone.s3.object;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import java.io.IOException;
-
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-
-/**
- * Test head object.
- */
-public class TestHeadObject {
-  private String volName = "vol1";
-  private String bucketName = "b1";
-  private OzoneClientStub clientStub;
-  private ObjectStore objectStoreStub;
-  private HeadObject headObject;
-  private OzoneBucket bucket;
-
-  @Before
-  public void setup() throws IOException {
-//Create client stub and object store stub.
-clientStub = new OzoneClientStub();
-objectStoreStub = clientStub.getObjectStore();
-
-// Create volume and bucket
-objectStoreStub.createVolume(volName);
-OzoneVolume volumeStub = objectStoreStub.getVolume(volName);
-volumeStub.createBucket(bucketName);
-bucket = objectStoreStub.getVolume(volName).getBucket(bucketName);
-
-// Create HeadBucket and setClient to OzoneClientStub
-headObject = new HeadObject();
-headObject.setClient(clientStub);
-  }
-
-  @Test
-  public void testHeadObject() throws Exception {
-//GIVEN
-String value = RandomStringUtils.randomAlphanumeric(32);
-OzoneOutputStream out = bucket.createKey("key1",
-value.getBytes().length, ReplicationType.STAND_ALONE,
-ReplicationFactor.ONE);
-out.write(value.getBytes());
-out.close();
-
-//WHEN
-Response response = headObject.head(volName, bucketName, "key1");
-
-//THEN
-Assert.assertEquals(200, response.getStatus());
-Assert.assertEquals(value.getBytes().length,
-Long.parseLong(response.getHeaderString("Content-Length")));
-  }
-
-  @Test
-  public void testHeadFailByBadName() throws Exception {
-//Head an object that doesn't exist.
-try {
-  headObject.head(volName, bucketName, "badKeyName");
-} catch (OS3Exception ex) {
-  Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
-  Assert.assertTrue(ex.getErrorMessage().contains("object does not 
exist"));
-  Assert.assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2914e5/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
deleted file mode 100644
index a8f0648..000
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/object/TestPutObject.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org

[46/50] [abbrv] hadoop git commit: YARN-8842. Expose metrics for custom resource types in QueueMetrics. (Contributed by Szilard Nemeth)

2018-10-16 Thread shv
YARN-8842. Expose metrics for custom resource types in QueueMetrics. 
(Contributed by Szilard Nemeth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84e22a6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84e22a6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84e22a6a

Branch: refs/heads/HDFS-12943
Commit: 84e22a6af46db2859d7d2caf192861cae9b6a1a8
Parents: 538250d
Author: Haibo Chen 
Authored: Tue Oct 16 14:12:02 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 14:14:30 2018 -0700

--
 .../resourcetypes/ResourceTypesTestHelper.java  |  22 +
 .../resourcemanager/scheduler/QueueMetrics.java | 130 +++-
 .../QueueMetricsForCustomResources.java | 158 +
 .../scheduler/capacity/CapacityScheduler.java   |   5 +-
 .../resourcemanager/scheduler/QueueInfo.java|  90 +++
 .../scheduler/QueueMetricsTestData.java | 105 +++
 .../scheduler/ResourceMetricsChecker.java   |  88 ++-
 .../scheduler/TestQueueMetrics.java | 250 +++
 .../TestQueueMetricsForCustomResources.java | 645 +++
 9 files changed, 1325 insertions(+), 168 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
index 98a8a00..3c3c2cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
@@ -16,6 +16,7 @@
 
 package org.apache.hadoop.yarn.resourcetypes;
 
+import com.google.common.collect.Maps;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -24,6 +25,7 @@ import 
org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 /**
  * Contains helper methods to create Resource and ResourceInformation objects.
@@ -90,4 +92,24 @@ public final class ResourceTypesTestHelper {
 return new ResourceValueAndUnit(value, matcher.group(2));
   }
 
+  public static Map extractCustomResources(Resource res) {
+Map customResources = Maps.newHashMap();
+for (int i = 0; i < res.getResources().length; i++) {
+  ResourceInformation ri = res.getResourceInformation(i);
+  if (!ri.getName().equals(ResourceInformation.MEMORY_URI)
+  && !ri.getName().equals(ResourceInformation.VCORES_URI)) {
+customResources.put(ri.getName(), ri.getValue());
+  }
+}
+return customResources;
+  }
+
+  public static Map extractCustomResourcesAsStrings(
+  Resource res) {
+Map resValues = extractCustomResources(res);
+return resValues.entrySet().stream()
+.collect(Collectors.toMap(
+Map.Entry::getKey, e -> String.valueOf(e.getValue(;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 20a5a1f..1315c2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudien

[41/50] [abbrv] hadoop git commit: YARN-8798. [Submarine] Job should not be submitted if --input_path option is missing. (Zhankun Tang via wangda)

2018-10-16 Thread shv
YARN-8798. [Submarine] Job should not be submitted if --input_path option is 
missing. (Zhankun Tang via wangda)

Change-Id: I7ae0e44eb5179b04a6ac861ec1c65f3b18c38f0f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143d7477
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143d7477
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143d7477

Branch: refs/heads/HDFS-12943
Commit: 143d74775b2b62884090fdd88874134b9eab2888
Parents: 46d6e00
Author: Wangda Tan 
Authored: Tue Oct 16 13:39:34 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../client/cli/param/RunJobParameters.java  |  6 +++
 .../client/cli/TestRunJobCliParsing.java| 39 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143d7477/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
index 92a1883..d923e0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
@@ -62,6 +62,12 @@ public class RunJobParameters extends RunParameters {
 if (parsedCommandLine.getOptionValue(CliConstants.N_WORKERS) != null) {
   nWorkers = Integer.parseInt(
   parsedCommandLine.getOptionValue(CliConstants.N_WORKERS));
+  // Only check null value.
+  // Training job shouldn't ignore INPUT_PATH option
+  // But if nWorkers is 0, INPUT_PATH can be ignored because user can only 
run Tensorboard
+  if (null == input && 0 != nWorkers) {
+throw new ParseException("\"--" + CliConstants.INPUT_PATH + "\" is 
absent");
+  }
 }
 
 int nPS = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143d7477/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
index 295d6a8..240de06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.yarn.submarine.client.cli;
 
+import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -136,6 +137,44 @@ public class TestRunJobCliParsing {
   }
 
   @Test
+  public void testNoInputPathOptionSpecified() throws Exception {
+RunJobCli runJobCli = new RunJobCli(getMockClientContext());
+String expectedErrorMessage = "\"--" + CliConstants.INPUT_PATH + "\" is 
absent";
+String actualMessage = "";
+try {
+  runJobCli.run(
+  new String[]{"--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+  "--checkpoint_path", "hdfs://output",
+  "--num_workers", "1", "--worker_launch_cmd", "python run-job.py",
+  "--worker_resources", "memory=4g,vcores=2", "--tensorboard",
+  "true", "--verbose", "--wait_job_finish"});
+} catch (ParseException e) {
+  actualMessage = e.getMessage();
+  e.printStackTrace();
+}
+Assert.assertEquals(expectedErrorMessage, actualMessage);
+  }
+
+  /**
+   * when only run tensorboard, input_path is not needed
+   * */
+  @Test
+  public void testNoInputPathOptionButOnlyRunTensorboard() throws Exception {
+RunJobCli runJobCli = new RunJobCli(getMoc

[10/50] [abbrv] hadoop git commit: Revert "HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee."

2018-10-16 Thread shv
Revert "HDDS-629. Make ApplyTransaction calls in ContainerStateMachine 
idempotent. Contributed by Shashikant Banerjee."

This reverts commit 0473b6817cfe4f03acdcb0eedc78b509244f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22f37af9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22f37af9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22f37af9

Branch: refs/heads/HDFS-12943
Commit: 22f37af93583e9c63e03f0781ffb903e35544559
Parents: 0473b68
Author: Jitendra Pandey 
Authored: Sat Oct 13 12:14:39 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 12:14:39 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 -
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../common/helpers/ContainerReport.java | 205 +++
 .../common/helpers/KeyValueContainerReport.java | 117 +++
 .../container/common/interfaces/Container.java  |   5 -
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 --
 .../keyvalue/impl/BlockManagerImpl.java |  21 +-
 .../container/ozoneimpl/ContainerReader.java|   7 -
 .../StorageContainerDatanodeProtocol.proto  |   1 -
 .../ozone/om/helpers/OmKeyLocationInfo.java |   4 -
 .../hadoop/ozone/client/rpc/TestBCSID.java  | 147 -
 12 files changed, 328 insertions(+), 208 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ccc648..923271c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,7 +113,6 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
 
   /**
* OM LevelDB prefixes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index 04c87ae..a3430f8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,8 +42,7 @@ public final class MetadataKeyFilters {
   new MetadataKeyFilters.KeyPrefixFilter()
   .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
   .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
-  .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
+  .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
 
   private MetadataKeyFilters() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f37af9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
new file mode 100644
index 000..a4c1f2f
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ei

[03/50] [abbrv] hadoop git commit: HDDS-445. Create a logger to print out all of the incoming requests. Contributed by Bharat Viswanadham.

2018-10-16 Thread shv
HDDS-445. Create a logger to print out all of the incoming requests.
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c1fe073
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c1fe073
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c1fe073

Branch: refs/heads/HDFS-12943
Commit: 3c1fe073d2fef76676660144e7dce2050761ae64
Parents: ddc9649
Author: Anu Engineer 
Authored: Fri Oct 12 16:27:54 2018 -0700
Committer: Anu Engineer 
Committed: Fri Oct 12 16:27:54 2018 -0700

--
 .../hadoop-common/src/main/conf/log4j.properties   | 6 ++
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config   | 4 
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c1fe073/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index aeae2b8..0214da3 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -271,6 +271,12 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+#Http Server request logs for Ozone S3Gateway
+log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-_mm_dd.log
+log4j.appender.s3gatewayrequestlog.RetainDays=3
+
 
 # WebHdfs request log on datanodes
 # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c1fe073/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
index 2b22874..dd53d9d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
@@ -31,6 +31,10 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.Filename=/tmp/jetty-s3gateway-_mm_dd.log
+LOG4J.PROPERTIES_log4j.appender.s3gatewayrequestlog.RetainDays=3
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
new file mode 100644
index 000..7b3c2a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
@@ -0,0 +1,123 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## @description  check install user
+## @audience public
+## @stabilitystable
+function check_install_user()
+{
+  if [[ $(id -u) -ne 0 ]];then
+echo "This script must be run with a ROOT user!"
+exit # don't call exit_install()
+  fi
+}
+
+## @description  exit install
+## @audience public
+## @stabilitystable
+function exit_install()
+{
+  echo "Exit the installation!" | tee -a $LOG
+  exit $1
+}
+
+## @description  Check if the IP address format is correct
+## @audience public
+## @stabilitystable
+function valid_ip()
+{
+  local ip=$1
+  local stat=1
+
+  if [[ $ip =~ ^[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}$ ]]; then
+OIFS=$IFS
+IFS='.'
+ip=($ip)
+IFS=$OIFS
+
+if [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} 
-le 255 ]]; then
+  stat=$?
+fi
+  fi
+
+  return $stat
+}
+
+## @description  Check if the configuration file configuration is correct
+## @audience public
+## @stabilitystable
+function check_install_conf()
+{
+  echo "Check if the configuration file configuration is correct ..." | tee -a 
$LOG
+
+  # check etcd conf
+  hostCount=${#ETCD_HOSTS[@]}
+  if [[ $hostCount -lt 3 && hostCount -ne 0 ]]; then # <>2
+echo "Number of nodes = [$hostCount], must be configured to be greater 
than or equal to 3 servers! " | tee -a $LOG
+exit_install
+  fi
+  for ip in ${ETCD_HOSTS[@]}
+  do
+if ! valid_ip $ip; then
+  echo "]ETCD_HOSTS=[$ip], IP address format is incorrect! " | tee -a $LOG
+  exit_install
+fi
+  done
+  echo "Check if the configuration file configuration is correct [ Done ]" | 
tee -a $LOG
+}
+
+## @description  index by EtcdHosts list
+## @audience public
+## @stabilitystable
+function indexByEtcdHosts() {
+  index=0
+  while [ "$index" -lt "${#ETCD_HOSTS[@]}" ]; do
+if [ "${ETCD_HOSTS[$index]}" = "$1" ]; then
+  echo $index
+  return
+fi
+let "index++"
+  done
+  echo ""
+}
+
+## @description  get local IP
+## @audience public
+## @stabilitystable
+function getLocalIP()
+{
+  local _ip _myip _line _nl=$'\n'
+  while IFS=$': \t' read -a _line ;do
+  [ -z "${_line%inet}" ] &&
+ _ip=${_line[${#_line[1]}>4?1:2]} &&
+ [ "${_ip#127.0.0.1}" ] && _myip=$_ip
+done< <(LANG=C /sbin/ifconfig)
+  printf ${1+-v} $1 "%s${_nl:0:$[${#1}>0?0:1]}" $_myip
+}
+
+## @description  get ip list
+## @audience public
+## @stabilitystable
+function get_ip_list()
+{
+  array=$(ifconfig | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ 
\t]*//g' | cut -d ' ' -f2)
+
+  for ip in ${array[@]}
+  do
+LOCAL_HOST_IP_LIST+=(${ip})
+  done
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDDS-378. Remove dependencies between hdds/ozone and hdfs proto files. Contributed by Elek, Marton.

2018-10-16 Thread shv
HDDS-378. Remove dependencies between hdds/ozone and hdfs proto files. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f0b43fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f0b43fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f0b43fa

Branch: refs/heads/HDFS-12943
Commit: 5f0b43fa93d02c90956d48eb5c6c0b60deeac91c
Parents: 53e5173
Author: Arpit Agarwal 
Authored: Tue Oct 16 10:40:00 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 16 10:40:00 2018 -0700

--
 hadoop-hdds/common/pom.xml  |  6 --
 .../main/proto/ScmBlockLocationProtocol.proto   |  1 -
 .../StorageContainerLocationProtocol.proto  |  1 -
 hadoop-hdds/container-service/pom.xml   | 11 +---
 .../apache/hadoop/ozone/client/BucketArgs.java  |  2 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java |  2 +-
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../hadoop/ozone/client/rest/RestClient.java|  2 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  2 +-
 hadoop-ozone/common/pom.xml |  6 --
 .../hadoop/hdds/protocol/StorageType.java   | 64 
 .../ozone/client/rest/response/BucketInfo.java  |  2 +-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  7 +--
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  9 ++-
 .../hadoop/ozone/web/handlers/BucketArgs.java   |  2 +-
 .../hadoop/ozone/web/response/BucketInfo.java   |  2 +-
 .../src/main/proto/OzoneManagerProtocol.proto   | 12 +++-
 .../apache/hadoop/ozone/web/TestBucketInfo.java |  2 +-
 .../TestStorageContainerManagerHelper.java  |  2 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |  2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|  2 +-
 .../om/TestMultipleContainerReadWrite.java  |  2 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  2 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  2 +-
 .../web/handlers/BucketProcessTemplate.java |  2 +-
 .../hadoop/ozone/om/BucketManagerImpl.java  |  2 +-
 .../hadoop/ozone/om/S3BucketManagerImpl.java|  2 +-
 .../hadoop/ozone/om/TestBucketManagerImpl.java  |  2 +-
 .../hadoop/ozone/client/OzoneBucketStub.java|  2 +-
 .../hadoop/ozone/client/OzoneVolumeStub.java|  2 +-
 30 files changed, 102 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index bf2a6b9..65cd1d1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -218,12 +218,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
 
-
-  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
-
-
-  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
-
 ${basedir}/src/main/proto
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 9b4e0ac..01a0dde 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -28,7 +28,6 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdds;
 
-import "hdfs.proto";
 import "hdds.proto";
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
--
diff --git 
a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index fb01d6a..49d1975 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -28,7 +28,6 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdds;
 
-import "hdfs.proto";
 import "hdds.proto";
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/po

[34/50] [abbrv] hadoop git commit: HDFS-13993. TestDataNodeVolumeFailure#testTolerateVolumeFailuresAfterAddingMoreVolumes is flaky. Contributed by Ayush Saxena.

2018-10-16 Thread shv
HDFS-13993. 
TestDataNodeVolumeFailure#testTolerateVolumeFailuresAfterAddingMoreVolumes is 
flaky. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f90c64e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f90c64e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f90c64e6

Branch: refs/heads/HDFS-12943
Commit: f90c64e6242facf38c2baedeeda42e4a8293e642
Parents: 5f0b43f
Author: Inigo Goiri 
Authored: Tue Oct 16 11:22:57 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 16 11:22:57 2018 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f90c64e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 7d04942..b70a356 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -389,6 +389,7 @@ public class TestDataNodeVolumeFailure {
 DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
 DataNodeTestUtils.waitForDiskError(dn0,
 DataNodeTestUtils.getVolume(dn0, dn0Vol2));
+dn0.checkDiskError();
 assertFalse(dn0.shouldRun());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HDDS-519. Implement ListBucket REST endpoint. Contributed by LiXin Ge.

2018-10-16 Thread shv
HDDS-519. Implement ListBucket REST endpoint. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5033deb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5033deb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5033deb1

Branch: refs/heads/HDFS-12943
Commit: 5033deb13b7f393d165e282b0c3b9e1ee1390bb2
Parents: 8853fc8
Author: Márton Elek 
Authored: Sun Oct 14 11:29:50 2018 +0200
Committer: Márton Elek 
Committed: Sun Oct 14 11:30:18 2018 +0200

--
 .../hadoop/ozone/s3/bucket/ListBucket.java  | 74 +++
 .../ozone/s3/bucket/ListBucketResponse.java | 55 +++
 .../ozone/s3/commontypes/BucketMetadata.java| 53 +++
 .../hadoop/ozone/s3/exception/S3ErrorTable.java |  6 +-
 .../hadoop/ozone/client/OzoneVolumeStub.java|  7 +-
 .../hadoop/ozone/s3/bucket/TestListBucket.java  | 97 
 6 files changed, 290 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5033deb1/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucket.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucket.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucket.java
new file mode 100644
index 000..962b8a6
--- /dev/null
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucket.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.s3.bucket;
+
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.s3.EndpointBase;
+import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.*;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Iterator;
+
+/**
+ * List Object Rest endpoint.
+ */
+@Path("/{volume}")
+public class ListBucket extends EndpointBase {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ListBucket.class);
+
+  @GET
+  @Produces(MediaType.APPLICATION_XML)
+  public ListBucketResponse get(@PathParam("volume") String volumeName)
+  throws OS3Exception, IOException {
+OzoneVolume volume;
+try {
+  volume = getVolume(volumeName);
+} catch (NotFoundException ex) {
+  LOG.error("Exception occurred in ListBucket: volume {} not found.",
+  volumeName, ex);
+  OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
+  .NO_SUCH_VOLUME, S3ErrorTable.Resource.VOLUME);
+  throw os3Exception;
+} catch (IOException e) {
+  throw e;
+}
+
+Iterator volABucketIter = volume.listBuckets(null);
+ListBucketResponse response = new ListBucketResponse();
+
+while(volABucketIter.hasNext()) {
+  OzoneBucket next = volABucketIter.next();
+  BucketMetadata bucketMetadata = new BucketMetadata();
+  bucketMetadata.setName(next.getName());
+  bucketMetadata.setCreationDate(
+  Instant.ofEpochMilli(next.getCreationTime()));
+  response.addBucket(bucketMetadata);
+}
+
+return response;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5033deb1/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucketResponse.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucketResponse.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBucketResponse.java
new file mode 100644
index 000..1f117dd
--- /dev/null
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/bucket/ListBu

[21/50] [abbrv] hadoop git commit: HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances.

2018-10-16 Thread shv
HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation 
tokens that can authenticate to all KMS instances.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6fc72a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6fc72a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6fc72a0

Branch: refs/heads/HDFS-12943
Commit: b6fc72a0250ac3f2341ebe8a14d19b073e6224c8
Parents: ee1c80e
Author: Xiao Chen 
Authored: Mon Oct 15 10:50:27 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 15 10:51:55 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  2 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |  2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../security/token/DelegationTokenIssuer.java   |  3 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 11 ++-
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 32 +++-
 9 files changed, 34 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 29c5bcd..05d99ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
index 81caff4..187bee6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
@@ -22,7 +22,7 @@ import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
  * File systems that support Encryption Zones have to implement this interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 3d40b6a..fe4159b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/org/a

[44/50] [abbrv] hadoop git commit: YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

2018-10-16 Thread shv
YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

Change-Id: I46e8d9fd32c7745c313030da62da41486a77b3ea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46d6e001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46d6e001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46d6e001

Branch: refs/heads/HDFS-12943
Commit: 46d6e0016610ced51a76189daeb3ad0e3dbbf94c
Parents: ed08dd3
Author: Wangda Tan 
Authored: Tue Oct 16 13:36:59 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |   8 +
 .../installation/install.conf   |  74 
 .../installation/install.sh | 116 +
 .../package/calico/calico-node.service  |  50 +++
 .../installation/package/calico/calicoctl.cfg   |  22 +
 .../installation/package/docker/daemon.json |  23 +
 .../installation/package/docker/docker.service  |  35 ++
 .../installation/package/etcd/etcd.service  |  40 ++
 .../package/hadoop/container-executor.cfg   |  41 ++
 .../installation/package/submarine/submarine.sh |  25 ++
 .../installation/scripts/calico.sh  | 224 ++
 .../installation/scripts/docker.sh  | 166 +++
 .../installation/scripts/download-server.sh |  42 ++
 .../installation/scripts/environment.sh | 213 +
 .../installation/scripts/etcd.sh| 152 +++
 .../installation/scripts/hadoop.sh  | 117 +
 .../installation/scripts/menu.sh| 444 +++
 .../installation/scripts/nvidia-docker.sh   |  99 +
 .../installation/scripts/nvidia.sh  | 120 +
 .../installation/scripts/submarine.sh   |  38 ++
 .../installation/scripts/utils.sh   | 123 +
 21 files changed, 2172 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index a2ea08c..9c401e8 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -66,6 +66,14 @@
   0755
 
 
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation
+  /share/hadoop/yarn/submarine-installer
+  
+**/*
+  
+  0755
+
+
   hadoop-yarn/conf
   etc/hadoop
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
new file mode 100644
index 000..82dcf61
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# install config #
+
+# DNS
+LOCAL_DNS_HOST="172.17.0.9"   # /etc/resolv.conf
+YARN_DNS_HOST="10.196.69.173" # yarn dns server ip address
+
+# etcd hosts list
+ETCD_HOSTS=(10.196.69.173 10.196.69.174 10.196.69.175)
+
+# docker registry ip:port
+DOCKER_REGISTRY="10.120.196.232:5000"
+
+# Start the http download service on the specified server,
+# Will download all the dependencies in the http server,
+# Run the install script on other servers.
+# Automatically download dependencies from http,
+# Solve the problem that all servers are slow to download online.
+# At the same time, you can also manually download the dependenc

[22/50] [abbrv] hadoop git commit: HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.

2018-10-16 Thread shv
HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e13a38f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e13a38f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e13a38f4

Branch: refs/heads/HDFS-12943
Commit: e13a38f4bc358666e64687636cf7b025bce83b46
Parents: b6fc72a
Author: Jitendra Pandey 
Authored: Mon Oct 15 11:52:38 2018 -0700
Committer: Jitendra Pandey 
Committed: Mon Oct 15 11:52:38 2018 -0700

--
 .../container/keyvalue/KeyValueHandler.java |  10 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |  11 +-
 .../keyvalue/impl/BlockManagerImpl.java |  24 +++-
 .../keyvalue/impl/ChunkManagerImpl.java |  43 +++
 .../TestContainerStateMachineIdempotency.java   | 121 +++
 .../common/impl/TestContainerPersistence.java   |  17 +--
 6 files changed, 203 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e13a38f4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4c87b19..da77f1c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -241,9 +241,12 @@ public class KeyValueHandler extends Handler {
 newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
 containerSet.addContainer(newContainer);
   } else {
-throw new StorageContainerException("Container already exists with " +
-"container Id " + containerID, ContainerProtos.Result
-.CONTAINER_EXISTS);
+
+// The create container request for an already existing container can
+// arrive in case the ContainerStateMachine reapplies the transaction
+// on datanode restart. Just log a warning msg here.
+LOG.warn("Container already exists." +
+"container Id " + containerID);
   }
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -370,6 +373,7 @@ public class KeyValueHandler extends Handler {
 
   /**
* Handles Close Container Request. An open container is closed.
+   * Close Container call is idempotent.
*/
   ContainerCommandResponseProto handleCloseContainer(
   ContainerCommandRequestProto request, KeyValueContainer kvContainer) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e13a38f4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 8bdae0f..20598d9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -231,21 +231,18 @@ public final class ChunkUtils {
*
* @param chunkFile - chunkFile to write data into.
* @param info - chunk info.
-   * @return boolean isOverwrite
-   * @throws StorageContainerException
+   * @return true if the chunkFile exists and chunkOffset < chunkFile length,
+   * false otherwise.
*/
   public static boolean validateChunkForOverwrite(File chunkFile,
-  ChunkInfo info) throws StorageContainerException {
+  ChunkInfo info) {
 
 Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
 
 if (isOverWriteRequested(chunkFile, info)) {
   if (!isOverWritePermitted(info)) {
-log.error("Rejecting write chunk request. Chunk overwrite " +
+log.warn("Duplicate write chunk request. Chunk overwrite " +
 "without explicit request. {}", info.toString());
-throw new StorageContainerException("Rejecting write chunk request. " +
-"OverWrite flag required." + info.toString(),
-OVERWRITE_FLAG_REQUIRED);
   }
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e13a38f4/hadoop-hdds/container-service/src/main/java/org/apache/hadoo

[50/50] [abbrv] hadoop git commit: HDFS-13925. Unit Test for transitioning between different states. Contributed by Sherwood Zheng.

2018-10-16 Thread shv
HDFS-13925. Unit Test for transitioning between different states. Contributed 
by Sherwood Zheng.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a5b78ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a5b78ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a5b78ac

Branch: refs/heads/HDFS-12943
Commit: 3a5b78ac47ae8bca747e530941728d573975104c
Parents: 8094c38
Author: Konstantin V Shvachko 
Authored: Tue Oct 16 16:35:02 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Tue Oct 16 16:35:02 2018 -0700

--
 .../server/namenode/ha/TestObserverNode.java| 23 
 1 file changed, 23 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a5b78ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
index 28fd330..b18c5b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 import static 
org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
@@ -30,11 +31,13 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -100,6 +103,26 @@ public class TestObserverNode {
   }
 
   @Test
+  public void testNoActiveToObserver() throws Exception {
+try {
+  dfsCluster.transitionToObserver(0);
+} catch (ServiceFailedException e) {
+  return;
+}
+fail("active cannot be transitioned to observer");
+  }
+
+  @Test
+  public void testNoObserverToActive() throws Exception {
+try {
+  dfsCluster.transitionToActive(2);
+} catch (ServiceFailedException e) {
+  return;
+}
+fail("observer cannot be transitioned to active");
+  }
+
+  @Test
   public void testSimpleRead() throws Exception {
 Path testPath2 = new Path(testPath, "test2");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HADOOP-15826. @Retries annotation of putObject() call & uses wrong. Contributed by Steve Loughran and Ewan Higgs.

2018-10-16 Thread shv
HADOOP-15826. @Retries annotation of putObject() call & uses wrong.
Contributed by Steve Loughran and Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d59ca43b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d59ca43b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d59ca43b

Branch: refs/heads/HDFS-12943
Commit: d59ca43bff8a457ce7ab62a61acd89aacbe71b93
Parents: f90c64e
Author: Steve Loughran 
Authored: Tue Oct 16 20:02:54 2018 +0100
Committer: Steve Loughran 
Committed: Tue Oct 16 20:02:54 2018 +0100

--
 .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java   | 5 -
 .../java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java | 4 ++--
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d59ca43b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index df0ec5d..3c432fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2449,11 +2449,14 @@ public class S3AFileSystem extends FileSystem 
implements StreamCapabilities {
* Wait for an upload to complete.
* If the waiting for completion is interrupted, the upload will be
* aborted before an {@code InterruptedIOException} is thrown.
-   * @param upload upload to wait for
+   * If the upload (or its result collection) failed, this is where
+   * the failure is raised as an AWS exception
* @param key destination key
+   * @param uploadInfo upload to wait for
* @return the upload result
* @throws InterruptedIOException if the blocking was interrupted.
*/
+  @Retries.OnceRaw
   UploadResult waitForUploadCompletion(String key, UploadInfo uploadInfo)
   throws InterruptedIOException {
 Upload upload = uploadInfo.getUpload();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d59ca43b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
index a85a87f..a5f6817 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
@@ -436,7 +436,7 @@ public class WriteOperationHelper {
* @return the result of the operation
* @throws IOException on problems
*/
-  @Retries.OnceTranslated
+  @Retries.RetryTranslated
   public UploadResult uploadObject(PutObjectRequest putObjectRequest)
   throws IOException {
 // no retry; rely on xfer manager logic
@@ -451,7 +451,7 @@ public class WriteOperationHelper {
* @throws IOException on problems
* @param destKey destination key
*/
-  @Retries.RetryTranslated
+  @Retries.OnceTranslated
   public void revertCommit(String destKey) throws IOException {
 once("revert commit", destKey,
 () -> {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-15853. TestConfigurationDeprecation leaves behind a temp file, resulting in a license issue (ayushtkn via rkanter)

2018-10-16 Thread shv
HADOOP-15853. TestConfigurationDeprecation leaves behind a temp file, resulting 
in a license issue (ayushtkn via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe1a40a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe1a40a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe1a40a

Branch: refs/heads/HDFS-12943
Commit: 7fe1a40a6ba692ce5907b96db3a7cb3639c091bd
Parents: ef9dc6c
Author: Robert Kanter 
Authored: Mon Oct 15 15:02:37 2018 -0700
Committer: Robert Kanter 
Committed: Mon Oct 15 15:02:37 2018 -0700

--
 .../java/org/apache/hadoop/conf/TestConfigurationDeprecation.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe1a40a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
index 4014b60..efb8131 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
@@ -77,6 +77,7 @@ public class TestConfigurationDeprecation {
 new File(CONFIG).delete();
 new File(CONFIG2).delete();
 new File(CONFIG3).delete();
+new File(CONFIG4).delete();
   }
   
   private void startConfig() throws IOException{


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDDS-665. Add hdds.datanode.dir to docker-config. Contributed by Bharat Viswanadham.

2018-10-16 Thread shv
HDDS-665. Add hdds.datanode.dir to docker-config. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/657032f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/657032f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/657032f5

Branch: refs/heads/HDFS-12943
Commit: 657032f5ddd2cdf111fc89c96672fe08b483cbfc
Parents: 84e22a6
Author: Bharat Viswanadham 
Authored: Tue Oct 16 15:29:53 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 16 15:29:53 2018 -0700

--
 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config   | 1 +
 hadoop-ozone/dist/src/main/compose/ozone/docker-config| 1 +
 hadoop-ozone/dist/src/main/compose/ozonefs/docker-config  | 1 +
 hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config| 2 ++
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config  | 2 ++
 hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config | 1 +
 6 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
index 3b2819f..9729aef 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
@@ -24,6 +24,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.replication=1
 
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 
 HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index f2c8db1..86257ff 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -24,6 +24,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.replication=1
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 4ff7f56..675dcba 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -25,6 +25,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 OZONE-SITE.XML_ozone.replication=1
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
index f1c0147..a814c39 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
@@ -23,6 +23,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
+
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 OZONE-SITE.XML_ozone.replication=1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 
b/hadoop-ozone/dist/src/main/compos

[19/50] [abbrv] hadoop git commit: YARN-8775. TestDiskFailures.testLocalDirsFailures sometimes can fail on concurrent File modifications. (Contributed by Antal Bálint Steinbach)

2018-10-16 Thread shv
YARN-8775. TestDiskFailures.testLocalDirsFailures sometimes can fail on 
concurrent File modifications. (Contributed by Antal Bálint Steinbach)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f880ff41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f880ff41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f880ff41

Branch: refs/heads/HDFS-12943
Commit: f880ff418c07965b333c750805c8de77a067b158
Parents: fa94d37
Author: Haibo Chen 
Authored: Mon Oct 15 09:37:20 2018 -0700
Committer: Haibo Chen 
Committed: Mon Oct 15 09:37:20 2018 -0700

--
 .../nodemanager/LocalDirsHandlerService.java|  5 ++-
 .../hadoop/yarn/server/TestDiskFailures.java| 38 
 2 files changed, 19 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f880ff41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index 6eabd0d..b2bb4e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -27,6 +27,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskValidator;
 import org.apache.hadoop.util.DiskValidatorFactory;
@@ -493,7 +495,8 @@ public class LocalDirsHandlerService extends 
AbstractService {
 
   }
 
-  private void checkDirs() {
+  @VisibleForTesting
+  public void checkDirs() {
 boolean disksStatusChange = false;
 Set failedLocalDirsPreCheck =
 new HashSet(localDirs.getFailedDirs());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f880ff41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
index e9de20a..23bb039 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -56,7 +55,12 @@ public class TestDiskFailures {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(TestDiskFailures.class);
 
-  private static final long DISK_HEALTH_CHECK_INTERVAL = 1000;//1 sec
+  /*
+   * Set disk check interval high enough so that it never runs during the test.
+   * Checks will be called manually if necessary.
+   */
+  private static final long TOO_HIGH_DISK_HEALTH_CHECK_INTERVAL =
+  1000 * 60 * 60 * 24;
 
   private static FileContext localFS = null;
   private static final File testDir = new File("target",
@@ -146,9 +150,10 @@ public class TestDiskFailures {
  : YarnConfiguration.NM_LOG_DIRS;
 
 Configuration conf = new Configuration();
-// set disk health check interval to a small value (say 1 sec).
+// set disk health check interval to a large value to effectively disable
+// disk health check done internally in LocalDirsHandlerService"
 conf.setLong(YarnC

[07/50] [abbrv] hadoop git commit: HDDS-654. Add ServiceLoader resource for OzoneFileSystem. Contributed by Jitendra Nath Pandey.

2018-10-16 Thread shv
HDDS-654. Add ServiceLoader resource for OzoneFileSystem. Contributed by 
Jitendra Nath Pandey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/addb8460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/addb8460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/addb8460

Branch: refs/heads/HDFS-12943
Commit: addb84600fa2f9e010768c642db3fdc2ec497d52
Parents: ae268b4
Author: Arpit Agarwal 
Authored: Sat Oct 13 09:19:45 2018 -0700
Committer: Arpit Agarwal 
Committed: Sat Oct 13 09:19:45 2018 -0700

--
 .../services/org.apache.hadoop.fs.FileSystem| 16 
 .../hadoop/fs/ozone/TestOzoneFileSystem.java| 39 
 2 files changed, 55 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/addb8460/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..0368002
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.ozone.OzoneFileSystem

http://git-wip-us.apache.org/repos/asf/hadoop/blob/addb8460/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
new file mode 100644
index 000..ba5253f
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Ozone file system tests that are not covered by contract tests.
+ */
+public class TestOzoneFileSystem {
+
+  @Test
+  public void testOzoneFsServiceLoader() throws IOException {
+Assert.assertEquals(
+FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null),
+OzoneFileSystem.class);
+  }
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HDDS-646. TestChunkStreams.testErrorReadGroupInputStream fails. Contributed by Nanda kumar.

2018-10-16 Thread shv
HDDS-646. TestChunkStreams.testErrorReadGroupInputStream fails. Contributed by 
Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddc96493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddc96493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddc96493

Branch: refs/heads/HDFS-12943
Commit: ddc964932817b4c4e4f4dc848dae764d5285e875
Parents: 5c8e023
Author: Arpit Agarwal 
Authored: Fri Oct 12 15:06:42 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Oct 12 15:06:42 2018 -0700

--
 .../src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc96493/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
index 7ff9d63..177694c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
@@ -226,7 +226,7 @@ public class TestChunkStreams {
 
   // read following 300 bytes, but only 200 left
   len = groupInputStream.read(resBuf, 340, 260);
-  assertEquals(5, groupInputStream.getCurrentStreamIndex());
+  assertEquals(4, groupInputStream.getCurrentStreamIndex());
   assertEquals(0, groupInputStream.getRemainingOfIndex(4));
   assertEquals(160, len);
   assertEquals(dataString, new String(resBuf).substring(0, 500));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: HDDS-490. Improve om and scm start up options . Contributed by Namit Maheshwari.

2018-10-16 Thread shv
HDDS-490. Improve om and scm start up options . Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2614078b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2614078b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2614078b

Branch: refs/heads/HDFS-12943
Commit: 2614078b213e03085635b4455ed391438eef8db2
Parents: 7fe1a40
Author: Arpit Agarwal 
Authored: Mon Oct 15 16:45:08 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Oct 15 16:45:08 2018 -0700

--
 .../scm/server/StorageContainerManager.java |   8 +-
 .../dist/src/main/compose/ozonescripts/start.sh |   4 +-
 hadoop-ozone/docs/content/RealCluster.md|  10 +-
 .../org/apache/hadoop/ozone/om/TestOmInit.java  | 103 +++
 .../apache/hadoop/ozone/om/OzoneManager.java|  14 +--
 5 files changed, 121 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2614078b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index ce2725f..9626105 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -927,10 +927,10 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
* Startup options.
*/
   public enum StartupOption {
-INIT("-init"),
-CLUSTERID("-clusterid"),
-GENCLUSTERID("-genclusterid"),
-REGULAR("-regular"),
+INIT("--init"),
+CLUSTERID("--clusterid"),
+GENCLUSTERID("--genclusterid"),
+REGULAR("--regular"),
 HELP("-help");
 
 private final String name;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2614078b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh 
b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
index 3358b07..9540eb9 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
@@ -15,10 +15,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1  docker 
inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers
-docker-compose exec scm /opt/hadoop/bin/ozone scm -init
+docker-compose exec scm /opt/hadoop/bin/ozone scm --init
 docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
 #We need a running SCM for om objectstore creation
 #TODO create a utility to wait for the startup
 sleep 10
-docker-compose exec om /opt/hadoop/bin/ozone om -createObjectStore
+docker-compose exec om /opt/hadoop/bin/ozone om --init
 docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2614078b/hadoop-ozone/docs/content/RealCluster.md
--
diff --git a/hadoop-ozone/docs/content/RealCluster.md 
b/hadoop-ozone/docs/content/RealCluster.md
index 9d86c84..78dd46e 100644
--- a/hadoop-ozone/docs/content/RealCluster.md
+++ b/hadoop-ozone/docs/content/RealCluster.md
@@ -26,7 +26,7 @@ menu:
 Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone 
Manager.
 
 {{< highlight bash >}}
-ozone scm -init
+ozone scm --init
 {{< /highlight >}}
 This allows SCM to create the cluster Identity and initialize its state.
 The ```init``` command is similar to Namenode format. Init command is executed 
only once, that allows SCM to create all the required on-disk structures to 
work correctly.
@@ -37,7 +37,7 @@ ozone --daemon start scm
 Once we know SCM is up and running, we can create an Object Store for our use. 
This is done by running the following command.
 
 {{< highlight bash >}}
-ozone om -createObjectStore
+ozone om --init
 {{< /highlight >}}
 
 
@@ -50,7 +50,7 @@ ozone --daemon start om
 
 At this point Ozone's name services, the Ozone manager, and the block service  
SCM is both running.
 **Please note**: If SCM is not running
-```createObjectStore``` command will fail. SCM start will fail if on-disk data 
structures are missing. So please make sure you have done both ```init``` and 
```createObjectStore``` commands.
+```om --init``` command will fail. SCM start will fail if on-disk data 
structures are missing

[48/50] [abbrv] hadoop git commit: YARN-8810. Fixed a YARN service bug in comparing ConfigFile object. Contributed by Chandni Singh

2018-10-16 Thread shv
YARN-8810.  Fixed a YARN service bug in comparing ConfigFile object.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bfd214a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bfd214a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bfd214a

Branch: refs/heads/HDFS-12943
Commit: 3bfd214a59a60263aff67850c4d646c64fd76a01
Parents: 657032f
Author: Eric Yang 
Authored: Tue Oct 16 18:54:40 2018 -0400
Committer: Eric Yang 
Committed: Tue Oct 16 18:54:40 2018 -0400

--
 .../yarn/service/UpgradeComponentsFinder.java   |  2 +-
 .../yarn/service/api/records/ConfigFile.java|  3 +-
 .../TestDefaultUpgradeComponentsFinder.java | 40 ++--
 3 files changed, 40 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
index 19ff6db..96a34f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
@@ -88,7 +88,7 @@ public interface UpgradeComponentsFinder {
   }
 
   if (!Objects.equals(currentDef.getConfiguration(),
-  currentDef.getConfiguration())) {
+  targetDef.getConfiguration())) {
 return targetDef.getComponents();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index cd9dc84..1cdae86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -199,7 +199,8 @@ public class ConfigFile implements Serializable {
 ConfigFile configFile = (ConfigFile) o;
 return Objects.equals(this.type, configFile.type)
 && Objects.equals(this.destFile, configFile.destFile)
-&& Objects.equals(this.srcFile, configFile.srcFile);
+&& Objects.equals(this.srcFile, configFile.srcFile)
+&& Objects.equals(this.properties, configFile.properties);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
index b0a01b3..304e740 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
@

[18/50] [abbrv] hadoop git commit: YARN-8869. YARN Service Client might not work correctly with RM REST API for Kerberos authentication. Contributed by Eric Yang.

2018-10-16 Thread shv
YARN-8869. YARN Service Client might not work correctly with RM REST API for 
Kerberos authentication. Contributed by Eric Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa94d370
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa94d370
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa94d370

Branch: refs/heads/HDFS-12943
Commit: fa94d370b6e3cba9c7560c09b517583e6652f103
Parents: b4a38e7
Author: Sunil G 
Authored: Mon Oct 15 21:21:57 2018 +0530
Committer: Sunil G 
Committed: Mon Oct 15 21:21:57 2018 +0530

--
 .../yarn/service/client/ApiServiceClient.java   |  19 ++--
 .../client/TestSecureApiServiceClient.java  | 113 ++-
 2 files changed, 120 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa94d370/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index b7a1541..851acbd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -70,7 +70,6 @@ import org.slf4j.LoggerFactory;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.WebResource.Builder;
 import com.sun.jersey.api.client.config.ClientConfig;
 import com.sun.jersey.api.client.config.DefaultClientConfig;
@@ -144,7 +143,7 @@ public class ApiServiceClient extends AppAdminClient {
   /**
* Calculate Resource Manager address base on working REST API.
*/
-  private String getRMWebAddress() {
+  String getRMWebAddress() {
 Configuration conf = getConfig();
 String scheme = "http://";;
 String path = "/app/v1/services/version";
@@ -156,8 +155,7 @@ public class ApiServiceClient extends AppAdminClient {
   .get("yarn.resourcemanager.webapp.https.address");
 }
 boolean useKerberos = UserGroupInformation.isSecurityEnabled();
-List rmServers = RMHAUtils
-.getRMHAWebappAddresses(new YarnConfiguration(conf));
+List rmServers = getRMHAWebAddresses(conf);
 for (String host : rmServers) {
   try {
 Client client = Client.create();
@@ -175,16 +173,16 @@ public class ApiServiceClient extends AppAdminClient {
 LOG.debug("Fail to resolve username: {}", e);
   }
 }
-WebResource webResource = client
-.resource(sb.toString());
+Builder builder = client
+.resource(sb.toString()).type(MediaType.APPLICATION_JSON);
 if (useKerberos) {
   String[] server = host.split(":");
   String challenge = generateToken(server[0]);
-  webResource.header(HttpHeaders.AUTHORIZATION, "Negotiate " +
+  builder.header(HttpHeaders.AUTHORIZATION, "Negotiate " +
   challenge);
   LOG.debug("Authorization: Negotiate {}", challenge);
 }
-ClientResponse test = webResource.get(ClientResponse.class);
+ClientResponse test = builder.get(ClientResponse.class);
 if (test.getStatus() == 200) {
   rmAddress = host;
   break;
@@ -197,6 +195,11 @@ public class ApiServiceClient extends AppAdminClient {
 return scheme+rmAddress;
   }
 
+  List getRMHAWebAddresses(Configuration conf) {
+return RMHAUtils
+.getRMHAWebappAddresses(new YarnConfiguration(conf));
+  }
+
   /**
* Compute active resource manager API service location.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa94d370/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-ya

[36/50] [abbrv] hadoop git commit: YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed by Robert Kanter)

2018-10-16 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
new file mode 100644
index 000..26760d3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
@@ -0,0 +1,408 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.webproxy;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.http.conn.ssl.DefaultHostnameVerifier;
+import org.apache.http.conn.util.PublicSuffixMatcherLoader;
+import org.bouncycastle.asn1.x500.X500Name;
+import org.bouncycastle.asn1.x509.AlgorithmIdentifier;
+import org.bouncycastle.asn1.x509.BasicConstraints;
+import org.bouncycastle.asn1.x509.Extension;
+import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
+import org.bouncycastle.cert.X509CertificateHolder;
+import org.bouncycastle.cert.X509v3CertificateBuilder;
+import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
+import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
+import org.bouncycastle.crypto.util.PrivateKeyFactory;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.operator.ContentSigner;
+import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder;
+import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder;
+import org.bouncycastle.operator.OperatorCreationException;
+import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLPeerUnverifiedException;
+import javax.net.ssl.SSLSession;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
+import javax.net.ssl.X509KeyManager;
+import javax.net.ssl.X509TrustManager;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.net.Socket;
+import java.security.GeneralSecurityException;
+import java.security.InvalidKeyException;
+import java.security.Key;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.KeyStore;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.Principal;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.SecureRandom;
+import java.security.Security;
+import java.security.SignatureException;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.Random;
+import java.util.UUID;
+
+/**
+ * Allows for the generation and acceptance of specialized HTTPS Certificates 
to
+ * be used for HTTPS communication between the AMs and the RM Proxy.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ProxyCA {
+  private static final Logger LOG = LoggerFactory.getLogger(ProxyCA.class);
+
+  private X509Certificate caCert;
+  private KeyPair caKeyPair;
+  private KeyStore childTrustStore;
+  private final Random srand;
+  private X509TrustManager defaultTrustManager;
+  private X509KeyManager x509KeyManager;
+  private HostnameVerifier ho

[05/50] [abbrv] hadoop git commit: HDDS-616. Collect all the robot test outputs and return with the right exit code. Contributed by Elek, Marton.

2018-10-16 Thread shv
HDDS-616. Collect all the robot test outputs and return with the right exit 
code. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdad91c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdad91c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdad91c0

Branch: refs/heads/HDFS-12943
Commit: cdad91c03985317ec7b1807246709444ff6d5da0
Parents: 28ca5c9
Author: Márton Elek 
Authored: Thu Oct 11 11:30:07 2018 +0200
Committer: Márton Elek 
Committed: Sat Oct 13 08:40:49 2018 +0200

--
 hadoop-ozone/dist/src/main/smoketest/test.sh | 25 ---
 1 file changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdad91c0/hadoop-ozone/dist/src/main/smoketest/test.sh
--
diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh 
b/hadoop-ozone/dist/src/main/smoketest/test.sh
index f3c2224..a6dc1f1 100755
--- a/hadoop-ozone/dist/src/main/smoketest/test.sh
+++ b/hadoop-ozone/dist/src/main/smoketest/test.sh
@@ -17,19 +17,35 @@
 
 set -e
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+RESULT_DIR=result
+#delete previous results
+rm -rf "${DIR:?}/$RESULT_DIR"
+mkdir -p "$DIR/$RESULT_DIR"
+#Should be writeable from the docker containers where user is different.
+chmod ogu+w "$DIR/$RESULT_DIR"
 
 execute_tests(){
-  COMPOSE_FILE=$DIR/../compose/$1/docker-compose.yaml
+  COMPOSE_DIR=$1
+  COMPOSE_FILE=$DIR/../compose/$COMPOSE_DIR/docker-compose.yaml
   TESTS=$2
-  echo "Executing test ${TESTS[*]} with $COMPOSE_FILE"
+  echo "-"
+  echo "Executing test(s): [${TESTS[*]}]"
+  echo ""
+  echo "  Cluster type:  $COMPOSE_DIR"
+  echo "  Compose file:  $COMPOSE_FILE"
+  echo "  Output dir:$DIR/$RESULT_DIR"
+  echo "  Command to rerun:  ./test.sh --keep --env $COMPOSE_DIR $TESTS"
+  echo "-"
   docker-compose -f "$COMPOSE_FILE" down
   docker-compose -f "$COMPOSE_FILE" up -d
   echo "Waiting 30s for cluster start up..."
   sleep 30
   for TEST in "${TESTS[@]}"; do
+ TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
  set +e
- docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot 
"smoketest/$TEST"
+ docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot --log 
NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output 
"smoketest/$RESULT_DIR/robot-$COMPOSE_DIR-${TEST//\//_/}.xml" --logtitle 
"$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
  set -e
+ docker-compose -f "$COMPOSE_FILE" logs > 
"$DIR/$RESULT_DIR/docker-$COMPOSE_DIR-${TEST//\//_/}.log"
   done
   if [ "$KEEP_RUNNING" = false ]; then
  docker-compose -f "$COMPOSE_FILE" down
@@ -99,3 +115,6 @@ if [ "$RUN_ALL" = true ]; then
 else
execute_tests "$DOCKERENV" "${POSITIONAL[@]}"
 fi
+
+#Generate the combined output and return with the right exit code (note: robot 
= execute test, rebot = generate output)
+docker run --rm -it -v "$DIR/..:/opt/hadoop" apache/hadoop-runner rebot -d 
"smoketest/$RESULT_DIR" "smoketest/$RESULT_DIR/robot-*.xml"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HDDS-579. ContainerStateMachine should fail subsequent transactions per container in case one fails. Contributed by Shashikant Banerjee.

2018-10-16 Thread shv
HDDS-579. ContainerStateMachine should fail subsequent transactions per 
container in case one fails. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603649d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603649d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603649d3

Branch: refs/heads/HDFS-12943
Commit: 603649d3a9ff12b725d06f5f317966de9a59fe70
Parents: 5209c75
Author: Jitendra Pandey 
Authored: Sat Oct 13 19:15:01 2018 -0700
Committer: Jitendra Pandey 
Committed: Sat Oct 13 19:15:01 2018 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   4 +-
 .../container/common/impl/HddsDispatcher.java   |  63 +--
 .../container/keyvalue/KeyValueHandler.java |  20 +-
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../rpc/TestContainerStateMachineFailures.java  | 185 +++
 5 files changed, 242 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/603649d3/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 662df8f..da55db3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -139,6 +139,7 @@ enum Result {
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
+  CONTAINER_UNHEALTHY = 36;
 }
 
 /**
@@ -161,7 +162,8 @@ enum ContainerLifeCycleState {
 OPEN = 1;
 CLOSING = 2;
 CLOSED = 3;
-INVALID = 4;
+UNHEALTHY = 4;
+INVALID = 5;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603649d3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index bb5002a..1849841 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -142,6 +142,26 @@ public class HddsDispatcher implements ContainerDispatcher 
{
 responseProto = handler.handle(msg, container);
 if (responseProto != null) {
   metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
+
+  // If the request is of Write Type and the container operation
+  // is unsuccessful, it implies the applyTransaction on the container
+  // failed. All subsequent transactions on the container should fail and
+  // hence replica will be marked unhealthy here. In this case, a close
+  // container action will be sent to SCM to close the container.
+  if (!HddsUtils.isReadOnly(msg)
+  && responseProto.getResult() != ContainerProtos.Result.SUCCESS) {
+// If the container is open and the container operation has failed,
+// it should be first marked unhealthy and the initiate the close
+// container action. This also implies this is the first transaction
+// which has failed, so the container is marked unhealthy right here.
+// Once container is marked unhealthy, all the subsequent write
+// transactions will fail with UNHEALTHY_CONTAINER exception.
+if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
+  container.getContainerData()
+  .setState(ContainerLifeCycleState.UNHEALTHY);
+  sendCloseContainerActionIfNeeded(container);
+}
+  }
   return responseProto;
 } else {
   return ContainerUtils.unsupportedRequest(msg);
@@ -149,31 +169,46 @@ public class HddsDispatcher implements 
ContainerDispatcher {
   }
 
   /**
-   * If the container usage reaches the close threshold we send Close
-   * ContainerAction to SCM.
-   *
+   * If the container usage reaches the close threshold or the container is
+   * marked unhealthy we send Close ContainerAction to SCM.
* @param container current state of container
*/
   private void sendCloseContainerActionIfNeeded(Container container) {
 // We have to find a more efficient way to close a container.
-Boolean isOpen = Optional.ofNullable(container)
+boolean isSpaceFull = isContainerFull(container);
+boolean shouldClose = is

[39/50] [abbrv] hadoop git commit: MAPREDUCE-7150. Optimize collections used by MR JHS to reduce its memory. (Contributed by Misha Dmitriev)

2018-10-16 Thread shv
MAPREDUCE-7150. Optimize collections used by MR JHS to reduce its memory. 
(Contributed by Misha Dmitriev)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/babd1449
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/babd1449
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/babd1449

Branch: refs/heads/HDFS-12943
Commit: babd1449bf8898f44c434c852e67240721c0eb00
Parents: c2288ac
Author: Haibo Chen 
Authored: Tue Oct 16 13:44:41 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 13:44:41 2018 -0700

--
 .../counters/FileSystemCounterGroup.java| 56 
 .../mapreduce/jobhistory/JobHistoryParser.java  |  2 +-
 .../hadoop/mapreduce/v2/hs/CompletedTask.java   |  5 +-
 .../mapreduce/v2/hs/CompletedTaskAttempt.java   |  2 +-
 4 files changed, 38 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/babd1449/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
index 046368e..ed7f271 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
@@ -61,8 +61,9 @@ public abstract class FileSystemCounterGroup
 
   // C[] would need Array.newInstance which requires a Class reference.
   // Just a few local casts probably worth not having to carry it around.
-  private final Map map =
-new ConcurrentSkipListMap();
+  // Initialized lazily, since in some situations millions of empty maps can
+  // waste a substantial (e.g. 4% as we observed) portion of the heap
+  private Map map;
   private String displayName;
 
   private static final Joiner NAME_JOINER = Joiner.on('_');
@@ -214,6 +215,9 @@ public abstract class FileSystemCounterGroup
   @SuppressWarnings("unchecked")
   public synchronized C findCounter(String scheme, FileSystemCounter key) {
 final String canonicalScheme = checkScheme(scheme);
+if (map == null) {
+  map = new ConcurrentSkipListMap<>();
+}
 Object[] counters = map.get(canonicalScheme);
 int ord = key.ordinal();
 if (counters == null) {
@@ -247,10 +251,12 @@ public abstract class FileSystemCounterGroup
   protected abstract C newCounter(String scheme, FileSystemCounter key);
 
   @Override
-  public int size() {
+  public synchronized int size() {
 int n = 0;
-for (Object[] counters : map.values()) {
-  n += numSetCounters(counters);
+if (map != null) {
+  for (Object[] counters : map.values()) {
+n += numSetCounters(counters);
+  }
 }
 return n;
   }
@@ -271,19 +277,23 @@ public abstract class FileSystemCounterGroup
* FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
*/
   @Override
-  public void write(DataOutput out) throws IOException {
-WritableUtils.writeVInt(out, map.size()); // #scheme
-for (Map.Entry entry : map.entrySet()) {
-  WritableUtils.writeString(out, entry.getKey()); // scheme
-  // #counter for the above scheme
-  WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
-  for (Object counter : entry.getValue()) {
-if (counter == null) continue;
-@SuppressWarnings("unchecked")
-FSCounter c = (FSCounter) ((Counter)counter).getUnderlyingCounter();
-WritableUtils.writeVInt(out, c.key.ordinal());  // key
-WritableUtils.writeVLong(out, c.getValue());// value
+  public synchronized void write(DataOutput out) throws IOException {
+if (map != null) {
+  WritableUtils.writeVInt(out, map.size()); // #scheme
+  for (Map.Entry entry : map.entrySet()) {
+WritableUtils.writeString(out, entry.getKey()); // scheme
+// #counter for the above scheme
+WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
+for (Object counter : entry.getValue()) {
+  if (counter == null) continue;
+  @SuppressWarnings("unchecked")
+  FSCounter c = (FSCounter) ((Counter) counter).getUnderlyingCounter();
+  WritableUtils.writeVInt(out, c.key.ordinal());  // key
+  WritableUtils.writeVLong(out, c.getValue());// value
+  

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-10-16 Thread shv
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8094c38e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8094c38e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8094c38e

Branch: refs/heads/HDFS-12943
Commit: 8094c38efdbfd11519ce518636cb630ac50c5f93
Parents: bba8aa3 3bfd214
Author: Konstantin V Shvachko 
Authored: Tue Oct 16 16:32:13 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Tue Oct 16 16:32:13 2018 -0700

--
 README.txt  |   2 +
 .../resources/assemblies/hadoop-yarn-dist.xml   |   8 +
 .../src/main/conf/log4j.properties  |   6 +
 .../KeyProviderDelegationTokenExtension.java|  71 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |   4 +-
 .../crypto/key/kms/KMSClientProvider.java   | 220 +--
 .../key/kms/LoadBalancingKMSClientProvider.java |  75 ++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  75 +--
 .../org/apache/hadoop/security/Credentials.java |   1 +
 .../web/DelegationTokenAuthenticatedURL.java|  25 +-
 .../security/token/DelegationTokenIssuer.java   | 111 
 .../java/org/apache/hadoop/util/KMSUtil.java|  13 +-
 .../conf/TestConfigurationDeprecation.java  |   1 +
 ...TestKeyProviderDelegationTokenExtension.java |  20 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 138 
 .../kms/TestLoadBalancingKMSClientProvider.java |  63 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   3 +
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  21 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 349 --
 .../hadoop/hdds/scm/XceiverClientManager.java   |  17 +-
 .../scm/client/ContainerOperationClient.java|  15 +-
 hadoop-hdds/common/pom.xml  |   6 -
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   2 +-
 .../hadoop/hdds/scm/pipeline/Pipeline.java  | 211 ++
 .../hadoop/hdds/scm/pipeline/PipelineID.java|  80 +++
 .../hadoop/hdds/scm/pipeline/package-info.java  |  24 +
 .../org/apache/hadoop/ozone/OzoneConsts.java|   1 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   3 +-
 .../hadoop/utils/MetadataStoreBuilder.java  |  38 +-
 .../main/proto/DatanodeContainerProtocol.proto  |   4 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   1 -
 .../StorageContainerLocationProtocol.proto  |   1 -
 .../apache/hadoop/utils/TestMetadataStore.java  |   2 +
 .../hadoop/utils/TestRocksDBStoreMBean.java |  19 +-
 hadoop-hdds/container-service/pom.xml   |  11 +-
 .../common/helpers/ContainerReport.java | 205 --
 .../common/helpers/KeyValueContainerReport.java | 117 
 .../container/common/impl/HddsDispatcher.java   |  63 +-
 .../container/common/interfaces/Container.java  |   5 +
 .../container/common/utils/ContainerCache.java  |   6 +-
 .../container/keyvalue/KeyValueContainer.java   |   9 +-
 .../keyvalue/KeyValueContainerData.java |  16 +
 .../container/keyvalue/KeyValueHandler.java |  30 +-
 .../container/keyvalue/helpers/BlockUtils.java  |   3 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |  11 +-
 .../keyvalue/impl/BlockManagerImpl.java |  43 +-
 .../keyvalue/impl/ChunkManagerImpl.java |  43 ++
 .../container/ozoneimpl/ContainerReader.java|   7 +
 .../StorageContainerDatanodeProtocol.proto  |   2 +
 .../hdds/scm/pipeline/PipelineFactory.java  |  56 ++
 .../hdds/scm/pipeline/PipelineManager.java  |  58 ++
 .../hdds/scm/pipeline/PipelineProvider.java |  35 +
 .../hdds/scm/pipeline/PipelineStateManager.java | 179 +
 .../hdds/scm/pipeline/PipelineStateMap.java | 212 ++
 .../scm/pipeline/RatisPipelineProvider.java | 135 
 .../hdds/scm/pipeline/SCMPipelineManager.java   | 226 +++
 .../scm/pipeline/SimplePipelineProvider.java|  80 +++
 .../hadoop/hdds/scm/pipeline/package-info.java  |  24 +
 .../scm/server/StorageContainerManager.java |   8 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  14 +-
 .../org/apache/hadoop/hdfs/HdfsKMSUtil.java |  60 +-
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  29 +-
 .../resolver/order/RandomResolver.java  |  22 +-
 .../main/webapps/router/federationhealth.html   |   1 -
 .../server/namenode/AclEntryStatusFormat.java   | 109 ++--
 .../server/namenode/FSImageFormatPBINode.java   | 101 +--
 .../server/namenode/FSImageFormatProtobuf.java  |  26 +-
 .../namenode/INodeWithAdditionalFields.java |  36 +-
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../server/namenode/SerialNumberManager.java| 152 -
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 

[01/50] [abbrv] hadoop git commit: HDDS-415. 'ozone om' with incorrect argument first logs all the STARTUP_MSG. Contributed by Namit Maheshwari.

2018-09-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 c2e0e9a02 -> c377e3ca4


HDDS-415. 'ozone om' with incorrect argument first logs all the STARTUP_MSG. 
Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0846271
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0846271
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0846271

Branch: refs/heads/HDFS-12943
Commit: e08462715003a7680e6e9479d1842e4f0bb3c007
Parents: 5d084d7
Author: Márton Elek 
Authored: Thu Sep 13 13:47:59 2018 +0200
Committer: Márton Elek 
Committed: Thu Sep 13 14:01:53 2018 +0200

--
 .../src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0846271/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 348d0c0..b5bd361 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -258,7 +258,6 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 hParser.printGenericCommandUsage(System.err);
 System.exit(1);
   }
-  StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
   OzoneManager om = createOm(hParser.getRemainingArgs(), conf);
   if (om != null) {
 om.start();
@@ -298,6 +297,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 }
 switch (startOpt) {
 case CREATEOBJECTSTORE:
+  StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
   terminate(omInit(conf) ? 0 : 1);
   return null;
 case HELP:
@@ -305,6 +305,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
   terminate(0);
   return null;
 default:
+  StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
   return new OzoneManager(conf);
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDDS-463. Fix the release packaging of the ozone distribution. Contributed by Elek Marton.

2018-09-17 Thread shv
HDDS-463. Fix the release packaging of the ozone distribution. Contributed by 
Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d89c3e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d89c3e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d89c3e7

Branch: refs/heads/HDFS-12943
Commit: 3d89c3e73eba280b8780228fcd097809271b4c8a
Parents: 8af8453
Author: Bharat Viswanadham 
Authored: Mon Sep 17 11:49:09 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Sep 17 11:49:09 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching | 11 +++-
 .../assemblies/hadoop-src-with-hdds.xml | 56 
 .../assemblies/hadoop-src-with-hdsl.xml | 56 
 hadoop-dist/src/main/ozone/README.txt   | 51 ++
 4 files changed, 116 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d89c3e7/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index b4d94b3..8f1f169 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -122,7 +122,6 @@ run mkdir "ozone-${HDDS_VERSION}"
 run cd "ozone-${HDDS_VERSION}"
 run cp -p "${ROOT}/LICENSE.txt" .
 run cp -p "${ROOT}/NOTICE.txt" .
-run cp -p "${ROOT}/README.txt" .
 
 # Copy hadoop-common first so that it have always have all dependencies.
 # Remaining projects will copy only libraries which are not present already in 
'share' directory.
@@ -162,6 +161,14 @@ cp -r 
"${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./
 rm sbin/*all.sh
 rm sbin/*all.cmd
 
+#remove test and java sources
+find . -name "*tests.jar" | xargs rm
+find . -name "*sources.jar" | xargs rm
+find . -name jdiff -type d | xargs rm -rf
+
+#add ozone specific readme
+
+run cp "${ROOT}/hadoop-dist/src/main/ozone/README.txt" README.txt
 #Copy docker compose files
 run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
 
@@ -169,5 +176,5 @@ mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn
 mkdir -p ./share/hadoop/hdfs
 echo
-echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
+echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
 echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d89c3e7/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
new file mode 100644
index 000..b1e039f
--- /dev/null
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
@@ -0,0 +1,56 @@
+
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3";
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd";>
+  hadoop-src
+  
+tar.gz
+  
+  true
+  
+
+  .
+  
+LICENCE.txt
+README.txt
+NOTICE.txt
+  
+
+
+  .
+  true
+  
+.git/**
+**/.gitignore
+**/.svn
+**/*.iws
+**/*.ipr
+**/*.iml
+**/.classpath
+**/.project
+**/.settings
+**/target/**
+
+**/*.log
+**/build/**
+**/file:/**
+**/SecurityAuth.audit*
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d89c3e7/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
deleted file mode 100644
index b1e039f..000
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3";
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd";>
-  hadoop-src
-  
-tar.gz
-  
-  true
-  
-
-  .
-  
-LICENCE.txt
-README.txt
-NOTICE.txt
-  
-
-
-  .
-  true
-  
-.git/**
-**/.gitignore
-**/.svn
-**/*.iws
-**/*.ipr
-**/*.iml
-**/.classpath
-  

[18/50] [abbrv] hadoop git commit: YARN-8045. Reduce log output from container status calls. Contributed by Craig Condit

2018-09-17 Thread shv
YARN-8045. Reduce log output from container status calls. Contributed by Craig 
Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/144a55f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/144a55f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/144a55f0

Branch: refs/heads/HDFS-12943
Commit: 144a55f0e3ba302327baf2e98d1e07b953dcbbfd
Parents: 78902f0
Author: Shane Kumpf 
Authored: Fri Sep 14 10:41:55 2018 -0600
Committer: Shane Kumpf 
Committed: Fri Sep 14 10:41:55 2018 -0600

--
 .../containermanager/ContainerManagerImpl.java  | 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/144a55f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index b89e2dd..27a7c80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1474,10 +1474,37 @@ public class ContainerManagerImpl extends 
CompositeService implements
   }
 }
 ContainerStatus containerStatus = container.cloneAndGetContainerStatus();
-LOG.info("Returning " + containerStatus);
+logContainerStatus("Returning ", containerStatus);
 return containerStatus;
   }
 
+  private void logContainerStatus(String prefix, ContainerStatus status) {
+StringBuilder sb = new StringBuilder();
+sb.append(prefix);
+sb.append("ContainerStatus: [");
+sb.append("ContainerId: ");
+sb.append(status.getContainerId()).append(", ");
+sb.append("ExecutionType: ");
+sb.append(status.getExecutionType()).append(", ");
+sb.append("State: ");
+sb.append(status.getState()).append(", ");
+sb.append("Capability: ");
+sb.append(status.getCapability()).append(", ");
+sb.append("Diagnostics: ");
+sb.append(LOG.isDebugEnabled() ? status.getDiagnostics() : "...");
+sb.append(", ");
+sb.append("ExitStatus: ");
+sb.append(status.getExitStatus()).append(", ");
+sb.append("IP: ");
+sb.append(status.getIPs()).append(", ");
+sb.append("Host: ");
+sb.append(status.getHost()).append(", ");
+sb.append("ContainerSubState: ");
+sb.append(status.getContainerSubState());
+sb.append("]");
+LOG.info(sb.toString());
+  }
+
   @Private
   @VisibleForTesting
   protected void authorizeGetAndStopContainerRequest(ContainerId containerId,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker state. Contributed by Pradeep Ambati

2018-09-17 Thread shv
YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker 
state. Contributed by Pradeep Ambati


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/250b5001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/250b5001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/250b5001

Branch: refs/heads/HDFS-12943
Commit: 250b50018e8c94d8ca83ff981b01f26bb68c0842
Parents: e1b242a
Author: Jason Lowe 
Authored: Thu Sep 13 13:28:54 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 13:28:54 2018 -0500

--
 .../localizer/ResourceLocalizationService.java  |  87 +++---
 .../recovery/NMLeveldbStateStoreService.java| 173 +++-
 .../recovery/NMStateStoreService.java   |  29 +-
 .../recovery/NMMemoryStateStoreService.java |  18 +-
 .../TestNMLeveldbStateStoreService.java | 269 ---
 5 files changed, 418 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/250b5001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index d9b887f..71f48ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -308,63 +308,66 @@ public class ResourceLocalizationService extends 
CompositeService
 String user = userEntry.getKey();
 RecoveredUserResources userResources = userEntry.getValue();
 trackerState = userResources.getPrivateTrackerState();
-if (!trackerState.isEmpty()) {
-  LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-  null, dispatcher, true, super.getConfig(), stateStore,
-  dirsHandler);
-  LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
-  tracker);
-  if (oldTracker != null) {
-tracker = oldTracker;
-  }
-  recoverTrackerResources(tracker, trackerState);
+LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
+null, dispatcher, true, super.getConfig(), stateStore,
+dirsHandler);
+LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
+tracker);
+if (oldTracker != null) {
+  tracker = oldTracker;
 }
+recoverTrackerResources(tracker, trackerState);
 
 for (Map.Entry appEntry :
 userResources.getAppTrackerStates().entrySet()) {
   trackerState = appEntry.getValue();
-  if (!trackerState.isEmpty()) {
-ApplicationId appId = appEntry.getKey();
-String appIdStr = appId.toString();
-LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-appId, dispatcher, false, super.getConfig(), stateStore,
-dirsHandler);
-LocalResourcesTracker oldTracker = appRsrc.putIfAbsent(appIdStr,
-tracker);
-if (oldTracker != null) {
-  tracker = oldTracker;
-}
-recoverTrackerResources(tracker, trackerState);
+  ApplicationId appId = appEntry.getKey();
+  String appIdStr = appId.toString();
+  LocalResourcesTracker tracker1 = new LocalResourcesTrackerImpl(user,
+  appId, dispatcher, false, super.getConfig(), stateStore,
+  dirsHandler);
+  LocalResourcesTracker oldTracker1 = appRsrc.putIfAbsent(appIdStr,
+  tracker1);
+  if (oldTracker1 != null) {
+tracker1 = oldTracker1;
   }
+  recoverTrackerResources(tracker1, trackerState);
 }
   }
 }
   }
 
   private void recoverTrackerResources(LocalResourcesTracker tracker,
-  LocalResourceTrackerState state) throws URISyntaxException {
-for (LocalizedResourceProto proto : state.getLocalizedResources()) {
-  LocalResource rsrc = new LocalResourcePBI

[12/50] [abbrv] hadoop git commit: HADOOP-15733. Correct the log when Invalid emptier Interval configured. Contributed by Ayush Saxena

2018-09-17 Thread shv
HADOOP-15733. Correct the log when Invalid emptier Interval configured. 
Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef5c776a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef5c776a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef5c776a

Branch: refs/heads/HDFS-12943
Commit: ef5c776a42697fb3958a77646b759e2a6edfe48e
Parents: 291dcf2
Author: Brahma Reddy Battula 
Authored: Fri Sep 14 07:32:27 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Sep 14 07:32:27 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5c776a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 9c6a685..6e101a2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -249,7 +249,7 @@ public class TrashPolicyDefault extends TrashPolicy {
   LOG.info("Namenode trash configuration: Deletion interval = "
   + (deletionInterval / MSECS_PER_MINUTE)
   + " minutes, Emptier interval = "
-  + (emptierInterval / MSECS_PER_MINUTE) + " minutes.");
+  + (this.emptierInterval / MSECS_PER_MINUTE) + " minutes.");
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: YARN-8715. Make allocation tags in the placement spec optional for node-attributes. Contributed by Weiwei Yang.

2018-09-17 Thread shv
YARN-8715. Make allocation tags in the placement spec optional for 
node-attributes. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33d8327c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33d8327c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33d8327c

Branch: refs/heads/HDFS-12943
Commit: 33d8327cffdc483b538aec3022fd8730b85babdb
Parents: 95231f1
Author: Sunil G 
Authored: Mon Sep 17 10:07:45 2018 +0530
Committer: Sunil G 
Committed: Mon Sep 17 10:07:45 2018 +0530

--
 .../constraint/PlacementConstraintParser.java   | 44 ++--
 .../resource/TestPlacementConstraintParser.java | 22 ++
 .../distributedshell/ApplicationMaster.java | 20 +++--
 3 files changed, 78 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33d8327c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
index 93fd706..de9419a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.util.constraint;
 
+import com.google.common.base.Strings;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
@@ -589,6 +590,14 @@ public final class PlacementConstraintParser {
   this.num = number;
 }
 
+public static SourceTags emptySourceTags() {
+  return new SourceTags("", 0);
+}
+
+public boolean isEmpty() {
+  return Strings.isNullOrEmpty(tag) && num == 0;
+}
+
 public String getTag() {
   return this.tag;
 }
@@ -692,20 +701,47 @@ public final class PlacementConstraintParser {
   // foo=4,Pn
   String[] splitted = specStr.split(
   String.valueOf(EXPRESSION_VAL_DELIM), 2);
-  if (splitted.length != 2) {
+  final SourceTags st;
+  final String exprs;
+  if (splitted.length == 1) {
+// source tags not specified
+exprs = splitted[0];
+st = SourceTags.emptySourceTags();
+  } else if (splitted.length == 2) {
+exprs = splitted[1];
+String tagAlloc = splitted[0];
+st = SourceTags.parseFrom(tagAlloc);
+  } else {
 throw new PlacementConstraintParseException(
 "Unexpected placement constraint expression " + specStr);
   }
 
-  String tagAlloc = splitted[0];
-  SourceTags st = SourceTags.parseFrom(tagAlloc);
-  String exprs = splitted[1];
   AbstractConstraint constraint =
   PlacementConstraintParser.parseExpression(exprs);
 
   result.put(st, constraint.build());
 }
 
+// Validation
+Set sourceTagSet = result.keySet();
+if (sourceTagSet.stream()
+.filter(sourceTags -> sourceTags.isEmpty())
+.findAny()
+.isPresent()) {
+  // Source tags, e.g foo=3, is optional for a node-attribute constraint,
+  // but when source tags is absent, the parser only accept single
+  // constraint expression to avoid ambiguous semantic. This is because
+  // DS AM is requesting number of containers per the number specified
+  // in the source tags, we do overwrite when there is no source tags
+  // with num_containers argument from commandline. If that is partially
+  // missed in the constraints, we don't know if it is ought to
+  // overwritten or not.
+  if (result.size() != 1) {
+throw new PlacementConstraintParseException(
+"Source allocation tags is required for a multi placement"
++ " constraint expression.");
+  }
+}
 return result;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33d8327c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api

[22/50] [abbrv] hadoop git commit: HDDS-454. TestChunkStreams#testErrorReadGroupInputStream & TestChunkStreams#testReadGroupInputStream are failing. Contributed by chencan.

2018-09-17 Thread shv
HDDS-454. TestChunkStreams#testErrorReadGroupInputStream & 
TestChunkStreams#testReadGroupInputStream are failing. Contributed by chencan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5470de42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5470de42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5470de42

Branch: refs/heads/HDFS-12943
Commit: 5470de420b2660e3042a5fec566ecc6b0d8ff93a
Parents: c1df308
Author: Nanda kumar 
Authored: Sat Sep 15 01:38:13 2018 +0530
Committer: Nanda kumar 
Committed: Sat Sep 15 01:38:57 2018 +0530

--
 .../org/apache/hadoop/ozone/om/TestChunkStreams.java  | 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5470de42/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
index 7ce916a..7ff9d63 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
@@ -121,6 +121,7 @@ public class TestChunkStreams {
 int tempOffset = offset;
 ChunkInputStream in =
 new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+  private long pos = 0;
   private ByteArrayInputStream in =
   new ByteArrayInputStream(buf, tempOffset, 100);
 
@@ -131,7 +132,7 @@ public class TestChunkStreams {
 
   @Override
   public long getPos() throws IOException {
-throw new UnsupportedOperationException();
+return pos;
   }
 
   @Override
@@ -147,7 +148,9 @@ public class TestChunkStreams {
 
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
-return in.read(b, off, len);
+int readLen = in.read(b, off, len);
+pos += readLen;
+return readLen;
   }
 };
 inputStreams.add(in);
@@ -175,6 +178,7 @@ public class TestChunkStreams {
 int tempOffset = offset;
 ChunkInputStream in =
 new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+  private long pos = 0;
   private ByteArrayInputStream in =
   new ByteArrayInputStream(buf, tempOffset, 100);
 
@@ -185,7 +189,7 @@ public class TestChunkStreams {
 
   @Override
   public long getPos() throws IOException {
-throw new UnsupportedOperationException();
+return pos;
   }
 
   @Override
@@ -201,7 +205,9 @@ public class TestChunkStreams {
 
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
-return in.read(b, off, len);
+int readLen = in.read(b, off, len);
+pos += readLen;
+return readLen;
   }
 };
 inputStreams.add(in);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDDS-423. Introduce an ozone specific log4j.properties. Contributed by Elek, Marton.

2018-09-17 Thread shv
HDDS-423. Introduce an ozone specific log4j.properties. Contributed by Elek, 
Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/291dcf22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/291dcf22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/291dcf22

Branch: refs/heads/HDFS-12943
Commit: 291dcf2247f8f5f92a3290734a2ed081a99ccbc3
Parents: a1de8cb
Author: Hanisha Koneru 
Authored: Thu Sep 13 16:37:44 2018 -0700
Committer: Hanisha Koneru 
Committed: Thu Sep 13 16:37:44 2018 -0700

--
 .../src/main/conf/log4j.properties  |  23 ---
 .../src/main/compose/ozone/docker-config|   3 +
 .../src/main/compose/ozoneperf/docker-config|   2 +
 .../common/src/main/conf/log4j.properties   | 156 +++
 .../src/test/acceptance/basic/docker-config |   3 +-
 .../src/test/acceptance/ozonefs/docker-config   |   3 +-
 6 files changed, 165 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/291dcf22/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 5783013..aeae2b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -305,29 +305,6 @@ 
log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
 log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
 
 #
-# Add a logger for ozone that is separate from the Datanode.
-#
-log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-# Do not log into datanode logs. Remove this line to have single log.
-log4j.additivity.org.apache.hadoop.ozone=false
-
-# For development purposes, log both to console and log file.
-log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-log4j.appender.OZONE.Threshold=info
-log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
- %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-# Real ozone logger that writes to ozone.log
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-log4j.appender.FILE.Threshold=debug
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-%m%n
-#
 # Fair scheduler state dump
 #
 # Use following logger to dump the state to a separate file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/291dcf22/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 21127f8..0def70e 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -29,6 +29,9 @@ LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/291dcf22/hadoop-dist/src/main/compose/ozoneperf/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config 
b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index 2539950..309adee 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -29,4 +29,6 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 
HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+LOG4J.PROPERTIES_log4j.

[28/50] [abbrv] hadoop git commit: HDDS-449. Add a NULL check to protect DeadNodeHandler#onMessage. Contributed by LiXin Ge.

2018-09-17 Thread shv
HDDS-449. Add a NULL check to protect DeadNodeHandler#onMessage. Contributed by 
LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a65c3ea9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a65c3ea9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a65c3ea9

Branch: refs/heads/HDFS-12943
Commit: a65c3ea91cad7e8b453976bab2165ea4a3c6daf9
Parents: 985f3bf
Author: Márton Elek 
Authored: Sat Sep 15 13:35:00 2018 +0200
Committer: Márton Elek 
Committed: Sat Sep 15 13:35:21 2018 +0200

--
 .../java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java   | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a65c3ea9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index d694a10..7fda67d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -57,6 +57,11 @@ public class DeadNodeHandler implements 
EventHandler {
   EventPublisher publisher) {
 Set containers =
 node2ContainerMap.getContainers(datanodeDetails.getUuid());
+if (containers == null) {
+  LOG.info("There's no containers in dead datanode {}, no replica will be"
+  + " removed from the in-memory state.", datanodeDetails.getUuid());
+  return;
+}
 LOG.info(
 "Datanode {}  is dead. Removing replications from the in-memory 
state.",
 datanodeDetails.getUuid());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: Merge commit 'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943

2018-09-17 Thread shv
Merge commit 'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943

# Conflicts:
#   
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
#   
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ade422b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ade422b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ade422b

Branch: refs/heads/HDFS-12943
Commit: 9ade422b3fb585fe4cbecdc241d6642dcd510e8e
Parents: e97b104 eca1a4b
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 17:49:26 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Sep 17 18:50:23 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/PeerCache.java  |  8 ++---
 .../hdfs/client/impl/BlockReaderFactory.java| 12 +++
 .../client/impl/BlockReaderLocalLegacy.java |  2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java|  4 +--
 .../hdfs/shortcircuit/ShortCircuitReplica.java  |  2 +-
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |  3 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |  4 +--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java  |  6 ++--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  7 +++--
 .../org/apache/hadoop/hdfs/HdfsDtFetcher.java   |  7 +++--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |  7 +++--
 .../apache/hadoop/hdfs/SWebHdfsDtFetcher.java   |  7 +++--
 .../apache/hadoop/hdfs/WebHdfsDtFetcher.java|  7 +++--
 .../hadoop/hdfs/net/DomainPeerServer.java   |  6 ++--
 .../apache/hadoop/hdfs/net/TcpPeerServer.java   |  6 ++--
 .../hdfs/qjournal/client/AsyncLoggerSet.java|  6 ++--
 .../qjournal/client/QuorumJournalManager.java   |  6 ++--
 .../qjournal/server/GetJournalEditServlet.java  |  7 +++--
 .../hadoop/hdfs/qjournal/server/Journal.java| 12 +++
 .../hdfs/qjournal/server/JournalNode.java   | 10 +++---
 .../qjournal/server/JournalNodeRpcServer.java   |  4 +--
 .../qjournal/server/JournaledEditsCache.java|  2 +-
 .../token/block/BlockTokenSecretManager.java|  7 +++--
 .../DelegationTokenSecretManager.java   |  8 ++---
 .../hadoop/hdfs/server/balancer/Balancer.java   |  8 ++---
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  6 ++--
 .../hdfs/server/balancer/NameNodeConnector.java |  7 +++--
 .../AvailableSpaceBlockPlacementPolicy.java |  8 ++---
 .../server/blockmanagement/DatanodeManager.java |  6 ++--
 .../server/blockmanagement/HostFileManager.java |  7 +++--
 .../hadoop/hdfs/server/common/JspHelper.java|  6 ++--
 .../hdfs/server/common/MetricsLoggerTask.java   |  6 ++--
 .../apache/hadoop/hdfs/server/common/Util.java  |  7 +++--
 .../hdfs/server/datanode/DirectoryScanner.java  |  7 +++--
 .../server/datanode/ProfilingFileIoEvents.java  |  7 +++--
 .../server/datanode/ShortCircuitRegistry.java   |  7 +++--
 .../AvailableSpaceVolumeChoosingPolicy.java |  7 +++--
 .../RoundRobinVolumeChoosingPolicy.java |  7 +++--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  8 ++---
 .../impl/FsDatasetAsyncDiskService.java |  7 +++--
 .../impl/RamDiskAsyncLazyPersistService.java|  7 +++--
 .../fsdataset/impl/RamDiskReplicaTracker.java   |  7 +++--
 .../server/datanode/web/DatanodeHttpServer.java |  6 ++--
 .../web/RestCsrfPreventionFilterHandler.java|  4 +--
 .../datanode/web/SimpleHttpProxyHandler.java|  4 +--
 .../web/webhdfs/DataNodeUGIProvider.java|  6 ++--
 .../datanode/web/webhdfs/ExceptionHandler.java  |  4 +--
 .../server/datanode/web/webhdfs/HdfsWriter.java |  8 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java| 10 +++---
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 12 +++
 .../hadoop/hdfs/server/namenode/CachePool.java  |  2 --
 .../hdfs/server/namenode/CheckpointConf.java|  7 +++--
 .../hdfs/server/namenode/Checkpointer.java  |  8 ++---
 .../ContentSummaryComputationContext.java   |  8 ++---
 .../hadoop/hdfs/server/namenode/DfsServlet.java |  7 +++--
 .../namenode/EditLogBackupOutputStream.java |  7 +++--
 .../server/namenode/EditLogFileInputStream.java |  8 ++---
 .../namenode/EditLogFileOutputStream.java   | 11 ---
 .../hdfs/server/namenode/EditsDoubleBuffer.java |  7 +++--
 .../hdfs/server/namenode/FSEditLogAsync.java|  8 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   |  7 +++--
 .../hadoop/hdfs/server/namenode/FSImage.java|  9 +++---
 .../hdfs/server/namenode/FSImageFormat.java |  6 ++--
 .../server/namenode/FSImageFormatPBINode.java   |  7 +++--
 ...FSImagePreTransactionalStorageInspector.java | 10 +++---
 .../FSImageTransactionalStorageInspector.java   |  6 ++--
 .../hdfs/server/namenode/FSNamesyste

[09/50] [abbrv] hadoop git commit: HDDS-414. Fix sbin/stop-ozone.sh to stop Ozone daemons. Contributed by Elek, Marton.

2018-09-17 Thread shv
HDDS-414. Fix sbin/stop-ozone.sh to stop Ozone daemons. Contributed by Elek, 
Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4441fe92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4441fe92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4441fe92

Branch: refs/heads/HDFS-12943
Commit: 4441fe9201223d02b8aecccd36333bdccc9c0d0b
Parents: 76a0fdf
Author: Hanisha Koneru 
Authored: Thu Sep 13 13:34:22 2018 -0700
Committer: Hanisha Koneru 
Committed: Thu Sep 13 13:34:22 2018 -0700

--
 .../compose/ozonescripts/.ssh/authorized_keys   | 16 
 .../src/main/compose/ozonescripts/.ssh/config   | 18 +
 .../main/compose/ozonescripts/.ssh/environment  | 16 
 .../src/main/compose/ozonescripts/.ssh/id_rsa   | 42 
 .../main/compose/ozonescripts/.ssh/id_rsa.pub   | 16 
 .../src/main/compose/ozonescripts/Dockerfile| 33 +++
 .../src/main/compose/ozonescripts/README.md | 38 ++
 .../compose/ozonescripts/docker-compose.yaml| 42 
 .../src/main/compose/ozonescripts/docker-config | 37 +
 hadoop-dist/src/main/compose/ozonescripts/ps.sh | 17 
 .../src/main/compose/ozonescripts/start.sh  | 24 +++
 .../src/main/compose/ozonescripts/stop.sh   | 17 
 hadoop-ozone/common/src/main/bin/start-ozone.sh |  0
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  | 16 
 14 files changed, 325 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4441fe92/hadoop-dist/src/main/compose/ozonescripts/.ssh/authorized_keys
--
diff --git a/hadoop-dist/src/main/compose/ozonescripts/.ssh/authorized_keys 
b/hadoop-dist/src/main/compose/ozonescripts/.ssh/authorized_keys
new file mode 100644
index 000..ae39052
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozonescripts/.ssh/authorized_keys
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ssh-rsa 
B3NzaC1yc2EDAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H
 hadoop@cdae967fa87a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4441fe92/hadoop-dist/src/main/compose/ozonescripts/.ssh/config
--
diff --git a/hadoop-dist/src/main/compose/ozonescripts/.ssh/config 
b/hadoop-dist/src/main/compose/ozonescripts/.ssh/config
new file mode 100644
index 000..6506916
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozonescripts/.ssh/config
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+Host *
+ UserKnownHostsFile /dev/null
+ StrictHostKeyChecking no

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4441fe92/hadoop-dist/src/main/compose/ozonescripts/.ssh/environment
--
diff --git a/hadoop-dist/src/main/compose/ozonescripts/.ssh/environment 
b/hadoop-dist/src/main/compose/ozone

[31/50] [abbrv] hadoop git commit: HDDS-469. Rename 'ozone oz' to 'ozone sh'. Contributed by Arpit Agarwal.

2018-09-17 Thread shv
HDDS-469. Rename 'ozone oz' to 'ozone sh'. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82fbbd54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82fbbd54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82fbbd54

Branch: refs/heads/HDFS-12943
Commit: 82fbbd546c74785bcb5317e592ab2fc1c47ef59b
Parents: 87e2c0f
Author: Arpit Agarwal 
Authored: Sat Sep 15 22:12:47 2018 -0700
Committer: Arpit Agarwal 
Committed: Sat Sep 15 22:12:47 2018 -0700

--
 .../src/test/acceptance/basic/ozone-shell.robot | 36 ++--
 .../src/test/acceptance/ozonefs/ozonefs.robot   |  6 ++--
 .../acceptance/ozonefs/ozonesinglenode.robot| 16 -
 hadoop-ozone/common/src/main/bin/ozone  | 12 +++
 hadoop-ozone/docs/content/CommandShell.md   | 34 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 16 -
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  4 +--
 7 files changed, 62 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82fbbd54/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot 
b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
index cc88a8a..1bd1eb6 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
@@ -48,39 +48,39 @@ RpcClient without scheme
 *** Keywords ***
 Test ozone shell
 [arguments] ${protocol} ${server}   ${volume}
-${result} = Execute on  datanodeozone oz volume create 
${protocol}${server}/${volume} --user bilbo --quota 100TB --root
+${result} = Execute on  datanodeozone sh volume create 
${protocol}${server}/${volume} --user bilbo --quota 100TB --root
 Should not contain  ${result}   Failed
 Should contain  ${result}   Creating Volume: 
${volume}
-${result} = Execute on  datanodeozone oz volume list 
${protocol}${server}/ --user bilbo | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | 
select(.volumeName=="${volume}")'
+${result} = Execute on  datanodeozone sh volume list 
${protocol}${server}/ --user bilbo | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | 
select(.volumeName=="${volume}")'
 Should contain  ${result}   createdOn
-${result} = Execute on  datanodeozone oz volume list 
--user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | 
select(.volumeName=="${volume}")'
+${result} = Execute on  datanodeozone sh volume list 
--user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | 
select(.volumeName=="${volume}")'
 Should contain  ${result}   createdOn
-Execute on  datanodeozone oz volume update 
${protocol}${server}/${volume} --user bill --quota 10TB
-${result} = Execute on  datanodeozone oz volume info 
${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' 
| jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
+Execute on  datanodeozone sh volume update 
${protocol}${server}/${volume} --user bill --quota 10TB
+${result} = Execute on  datanodeozone sh volume info 
${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' 
| jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
 Should Be Equal ${result}   bill
-${result} = Execute on  datanodeozone oz volume info 
${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' 
| jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
+${result} = Execute on  datanodeozone sh volume info 
${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' 
| jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
 Should Be Equal ${result}   10
-Execute on  datanodeozone oz bucket create 
${protocol}${server}/${volume}/bb1
-${result} = Execute on  datanodeozone oz bucket info 
${protocol}${server}/${volume}/bb1 | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | 
.storageType'
+Execute on  datanodeozone s

[46/50] [abbrv] hadoop git commit: Merge commit '9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943

2018-09-17 Thread shv
Merge commit '9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943

# Conflicts:
#   
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1363eff6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1363eff6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1363eff6

Branch: refs/heads/HDFS-12943
Commit: 1363eff69c36c4f2085194b59a86370505cc00cd
Parents: 94d7f90 9af96d4
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 17:39:11 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Sep 17 17:39:11 2018 -0700

--
 .../hadoop-common/src/main/bin/start-all.cmd| 104 +--
 .../hadoop-common/src/main/bin/stop-all.cmd | 104 +--
 .../org/apache/hadoop/http/IsActiveServlet.java |  71 +
 .../apache/hadoop/http/TestIsActiveServlet.java |  95 +
 .../router/IsRouterActiveServlet.java   |  37 +++
 .../federation/router/RouterHttpServer.java |   9 ++
 .../src/site/markdown/HDFSRouterFederation.md   |   2 +-
 .../hadoop-hdfs/src/main/bin/hdfs-config.cmd|  86 +++
 .../hadoop-hdfs/src/main/bin/start-dfs.cmd  |  82 +++
 .../hadoop-hdfs/src/main/bin/stop-dfs.cmd   |  82 +++
 .../namenode/IsNameNodeActiveServlet.java   |  33 ++
 .../server/namenode/NameNodeHttpServer.java |   3 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |   8 ++
 hadoop-mapreduce-project/bin/mapred-config.cmd  |  86 +++
 .../hadoop-streaming/src/test/bin/cat.cmd   |  36 +++
 .../hadoop-streaming/src/test/bin/xargs_cat.cmd |  36 +++
 .../hadoop-yarn/bin/start-yarn.cmd  |  94 -
 .../hadoop-yarn/bin/stop-yarn.cmd   |  94 -
 .../IsResourceManagerActiveServlet.java |  38 +++
 .../server/resourcemanager/ResourceManager.java |   5 +
 .../resourcemanager/webapp/RMWebAppFilter.java  |   3 +-
 .../src/site/markdown/ResourceManagerHA.md  |   5 +
 22 files changed, 709 insertions(+), 404 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1363eff6/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
--
diff --cc hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
index 9f65b5d,9f65b5d..805cfbe
--- a/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
+++ b/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
@@@ -1,52 -1,52 +1,52 @@@
--@echo off
--@rem Licensed to the Apache Software Foundation (ASF) under one or more
--@rem contributor license agreements.  See the NOTICE file distributed with
--@rem this work for additional information regarding copyright ownership.
--@rem The ASF licenses this file to You under the Apache License, Version 2.0
--@rem (the "License"); you may not use this file except in compliance with
--@rem the License.  You may obtain a copy of the License at
--@rem
--@rem http://www.apache.org/licenses/LICENSE-2.0
--@rem
--@rem Unless required by applicable law or agreed to in writing, software
--@rem distributed under the License is distributed on an "AS IS" BASIS,
--@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--@rem See the License for the specific language governing permissions and
--@rem limitations under the License.
--
--setlocal enabledelayedexpansion
--
--@rem Start all hadoop daemons.  Run this on master node.
--
--echo This script is Deprecated. Instead use start-dfs.cmd and start-yarn.cmd
--
--if not defined HADOOP_BIN_PATH ( 
--  set HADOOP_BIN_PATH=%~dp0
--)
--
--if "%HADOOP_BIN_PATH:~-1%" == "\" (
--  set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
--)
--
--set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
--if not defined HADOOP_LIBEXEC_DIR (
--  set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
--)
--
--call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
--if "%1" == "--config" (
--  shift
--  shift
--)
--
--@rem start hdfs daemons if hdfs is present
--if exist %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd (
--  call %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd --config %HADOOP_CONF_DIR%
--)
--
--@rem start yarn daemons if yarn is present
--if exist %HADOOP_YARN_HOME%\sbin\start-yarn.cmd (
--  call %HADOOP_YARN_HOME%\sbin\start-yarn.cmd --config %HADOOP_CONF_DIR%
--)
--
--endlocal
++@echo off
++@rem Licensed to the Apache Software Foundation (ASF) under one or more
++@rem contributor license agreements.  See the NOTICE file distributed with
++@rem this work for additional information regarding copyright ownership.
++@rem The ASF licenses this file to You under the Apache License, Version 2.0
++@rem (the "License"); you may not use this file except 

[39/50] [abbrv] hadoop git commit: HDDS-435. Enhance the existing ozone documentation. Contributed by Elek, Marton.

2018-09-17 Thread shv
HDDS-435. Enhance the existing ozone documentation.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8af84535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8af84535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8af84535

Branch: refs/heads/HDFS-12943
Commit: 8af8453589232695c01b362872c0bd83686b2184
Parents: fdf5a3f
Author: Anu Engineer 
Authored: Mon Sep 17 10:46:28 2018 -0700
Committer: Anu Engineer 
Committed: Mon Sep 17 10:46:28 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |   2 +
 hadoop-ozone/common/src/main/bin/ozone  |   1 -
 hadoop-ozone/docs/config.toml   |  23 --
 hadoop-ozone/docs/config.yaml   |  41 +++
 hadoop-ozone/docs/content/BucketCommands.md | 106 ++
 hadoop-ozone/docs/content/BuildingSources.md|  37 ++
 hadoop-ozone/docs/content/CommandShell.md   | 232 +---
 hadoop-ozone/docs/content/Concepts.md   | 101 +
 hadoop-ozone/docs/content/Dozone.md | 107 ++
 hadoop-ozone/docs/content/Freon.md  |  61 +++
 hadoop-ozone/docs/content/GettingStarted.md | 369 ---
 hadoop-ozone/docs/content/Hdds.md   |  49 +++
 hadoop-ozone/docs/content/JavaApi.md| 152 
 hadoop-ozone/docs/content/KeyCommands.md| 111 ++
 hadoop-ozone/docs/content/Metrics.md| 170 -
 hadoop-ozone/docs/content/OzoneFS.md|  64 
 hadoop-ozone/docs/content/OzoneManager.md   |  61 +++
 hadoop-ozone/docs/content/RealCluster.md|  71 
 hadoop-ozone/docs/content/Rest.md   |  32 +-
 hadoop-ozone/docs/content/RunningViaDocker.md   |  70 
 hadoop-ozone/docs/content/RunningWithHDFS.md|  61 +++
 hadoop-ozone/docs/content/SCMCLI.md |  26 ++
 hadoop-ozone/docs/content/Settings.md   | 139 +++
 hadoop-ozone/docs/content/VolumeCommands.md | 100 +
 hadoop-ozone/docs/content/_index.md |  86 +
 hadoop-ozone/docs/pom.xml   |   4 -
 .../ozonedoc/layouts/_default/single.html   |   3 +
 .../ozonedoc/layouts/partials/header.html   |   4 +-
 .../ozonedoc/layouts/partials/sidebar.html  |   8 +-
 .../themes/ozonedoc/static/css/ozonedoc.css |  14 +-
 30 files changed, 1493 insertions(+), 812 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af84535/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 128ce10..b4d94b3 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -156,6 +156,8 @@ cp 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-ser
 # Optional documentation, could be missing
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./
+
 
 rm sbin/*all.sh
 rm sbin/*all.cmd

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af84535/hadoop-ozone/common/src/main/bin/ozone
--
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index 5ad6801..6bf8b01 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -32,7 +32,6 @@ function hadoop_usage
   hadoop_add_option "--loglevel level" "set the log4j level for this command"
   hadoop_add_option "--workers" "turn on worker mode"
 
-
   hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
   hadoop_add_subcommand "datanode" daemon "run a HDDS datanode"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af84535/hadoop-ozone/docs/config.toml
--
diff --git a/hadoop-ozone/docs/config.toml b/hadoop-ozone/docs/config.toml
deleted file mode 100644
index eed74a9..000
--- a/hadoop-ozone/docs/config.toml
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in complian

[32/50] [abbrv] hadoop git commit: HDDS-470. Ozone acceptance tests are failing. Contributed by Elek, Marton.

2018-09-17 Thread shv
HDDS-470. Ozone acceptance tests are failing. Contributed by Elek, Marton.

(cherry picked from commit dca8d0c2615d142bca55d367a0bc988ce9860368)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07385f88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07385f88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07385f88

Branch: refs/heads/HDFS-12943
Commit: 07385f886ed534aba527820c0bda4dcf410e05f6
Parents: 82fbbd5
Author: Arpit Agarwal 
Authored: Sun Sep 16 14:31:09 2018 -0700
Committer: Arpit Agarwal 
Committed: Sun Sep 16 14:31:29 2018 -0700

--
 .../test/acceptance/ozonefs/ozonesinglenode.robot | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07385f88/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
 
b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
index b718bc9..15ad5bb 100644
--- 
a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
+++ 
b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 *** Settings ***
-Documentation   Ozonefs Single Node Test
+Documentation   Ozone Single Node Test
 Library OperatingSystem
 Suite Setup Startup Ozone cluster with size  1
 Suite Teardown  Teardown Ozone cluster
@@ -27,23 +27,23 @@ ${PROJECTDIR}   ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 Create volume and bucket
-Execute on  datanodeozone sh -createVolume 
http://ozoneManager/fstest -user bilbo -quota 100TB -root
-Execute on  datanodeozone sh -createBucket 
http://ozoneManager/fstest/bucket1
+Execute on  datanodeozone sh volume create 
http://ozoneManager/fstest --user bilbo --quota 100TB --root
+Execute on  datanodeozone sh bucket create 
http://ozoneManager/fstest/bucket1
 
 Check volume from ozonefs
 ${result} = Execute on  datanode  ozone fs -ls 
o3://bucket1.fstest/
 
 Create directory from ozonefs
 Execute on  datanode  ozone fs -mkdir 
-p o3://bucket1.fstest/testdir/deep
-${result} = Execute on  ozoneManager  ozone sh 
-listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+${result} = Execute on  ozoneManager  ozone sh key 
list o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
 Should contain${result}
 testdir/deep
 Test key handling
-Execute on  datanodeozone sh -putKey 
o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt -replicationFactor 1
+Execute on  datanodeozone sh key put 
o3://ozoneManager/fstest/bucket1/key1 NOTICE.txt --replication ONE
 Execute on  datanoderm -f NOTICE.txt.1
-Execute on  datanodeozone sh -getKey 
o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt.1
+Execute on  datanodeozone sh key get 
o3://ozoneManager/fstest/bucket1/key1 NOTICE.txt.1
 Execute on  datanodels -l NOTICE.txt.1
-${result} = Execute on  datanodeozone sh -infoKey 
o3://ozoneManager/fstest/bucket1/key1 | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+${result} = Execute on  datanodeozone sh key info 
o3://ozoneManager/fstest/bucket1/key1 | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
 Should contain  ${result}   createdOn
-${result} = Execute on  datanodeozone sh -listKey 
o3://ozoneManager/fstest/bucket1 | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | 
.keyName'
+${result} = Execute on  datanodeozone sh key list 
o3://ozoneManager/fstest/bucket1 | grep -Ev 
'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | 
.keyName'
 Should Be Equal ${result}   key1
-Execute on  datanodeozone sh -deleteKey 
o3://ozoneManager/fstest/bucket1/key1 -v
+Execute on  datanodeozone sh key delete 
o3://ozoneManager/fstest/bucket1/key1


---

[14/50] [abbrv] hadoop git commit: YARN-8720. CapacityScheduler does not enforce max resource allocation check at queue level. Contributed by Tarun Parimi.

2018-09-17 Thread shv
YARN-8720. CapacityScheduler does not enforce max resource allocation check at 
queue level. Contributed by Tarun Parimi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1a893fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1a893fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1a893fd

Branch: refs/heads/HDFS-12943
Commit: f1a893fdbc2dbe949cae786f08bdb2651b88d673
Parents: 568ebec
Author: Weiwei Yang 
Authored: Fri Sep 14 16:33:51 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Sep 14 16:33:51 2018 +0800

--
 .../hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java | 3 ++-
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java | 5 +++--
 .../hadoop/yarn/server/resourcemanager/TestAppManager.java  | 3 +++
 .../hadoop/yarn/server/resourcemanager/TestClientRMService.java | 3 +++
 4 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1a893fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 4cd5925..b2c5ef3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -231,7 +231,8 @@ final class DefaultAMSProcessor implements 
ApplicationMasterServiceProcessor {
   }
 }
 
-Resource maximumCapacity = getScheduler().getMaximumResourceCapability();
+Resource maximumCapacity =
+getScheduler().getMaximumResourceCapability(app.getQueue());
 
 // sanity check
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1a893fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index ee78c08..d0f2ce6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -566,10 +566,11 @@ public class RMAppManager implements 
EventHandler,
 }
 
 // Normalize all requests
+String queue = submissionContext.getQueue();
 for (ResourceRequest amReq : amReqs) {
   SchedulerUtils.normalizeAndValidateRequest(amReq,
-  scheduler.getMaximumResourceCapability(),
-  submissionContext.getQueue(), scheduler, isRecovery, rmContext);
+  scheduler.getMaximumResourceCapability(queue),
+  queue, scheduler, isRecovery, rmContext);
 
   amReq.setCapability(
   scheduler.getNormalizedResource(amReq.getCapability()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1a893fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 27e87bd..0bd5372 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 

[20/50] [abbrv] hadoop git commit: HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. Contributed by Namit Maheshwari.

2018-09-17 Thread shv
HDDS-452. 'ozone scm' with incorrect argument first logs all the STARTUP_MSG. 
Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/446cb830
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/446cb830
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/446cb830

Branch: refs/heads/HDFS-12943
Commit: 446cb8301ebd651879dfd403d5fa342b4dfaf6e3
Parents: 0c8a43b
Author: Jitendra Pandey 
Authored: Fri Sep 14 11:38:50 2018 -0700
Committer: Jitendra Pandey 
Committed: Fri Sep 14 11:39:36 2018 -0700

--
 .../hadoop/hdds/scm/server/StorageContainerManager.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/446cb830/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 28a4983..b3408a4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -361,8 +361,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 hParser.printGenericCommandUsage(System.err);
 System.exit(1);
   }
-  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-  LOG);
   StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), 
conf);
   if (scm != null) {
 scm.start();
@@ -395,9 +393,13 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 }
 switch (startOpt) {
 case INIT:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   terminate(scmInit(conf) ? 0 : 1);
   return null;
 case GENCLUSTERID:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   System.out.println("Generating new cluster id:");
   System.out.println(StorageInfo.newClusterID());
   terminate(0);
@@ -407,6 +409,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   terminate(0);
   return null;
 default:
+  StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+  LOG);
   return new StorageContainerManager(conf);
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-8706. Allow additional flag in docker inspect call. Contributed by Chandni Singh

2018-09-17 Thread shv
YARN-8706.  Allow additional flag in docker inspect call.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99237607
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99237607
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99237607

Branch: refs/heads/HDFS-12943
Commit: 99237607bf73e97b06eeb3455aa1327bfab4d5d2
Parents: a12f12f
Author: Eric Yang 
Authored: Fri Sep 14 11:46:59 2018 -0400
Committer: Eric Yang 
Committed: Fri Sep 14 11:46:59 2018 -0400

--
 .../src/main/native/container-executor/impl/utils/docker-util.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99237607/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 3884abf..ee80798 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -558,7 +558,8 @@ cleanup:
 
 int get_docker_inspect_command(const char *command_file, const struct 
configuration *conf, args *args) {
   const char *valid_format_strings[] = { "{{.State.Status}}",
-
"{{range(.NetworkSettings.Networks)}}{{.IPAddress}},{{end}}{{.Config.Hostname}}"
 };
+
"{{range(.NetworkSettings.Networks)}}{{.IPAddress}},{{end}}{{.Config.Hostname}}",
+ "{{.State.Status}},{{.Config.StopSignal}}"};
   int ret = 0, i = 0, valid_format = 0;
   char *format = NULL, *container_name = NULL, *tmp_buffer = NULL;
   struct configuration command_config = {0, NULL};
@@ -578,7 +579,7 @@ int get_docker_inspect_command(const char *command_file, 
const struct configurat
 ret = INVALID_DOCKER_INSPECT_FORMAT;
 goto free_and_exit;
   }
-  for (i = 0; i < 2; ++i) {
+  for (i = 0; i < 3; ++i) {
 if (strcmp(format, valid_format_strings[i]) == 0) {
   valid_format = 1;
   break;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: Merge commit 'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943

2018-09-17 Thread shv
Merge commit 'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e97b104b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e97b104b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e97b104b

Branch: refs/heads/HDFS-12943
Commit: e97b104b7b33f2770a95aee0fbded7a3d3966873
Parents: 1363eff b3161c4
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 17:43:13 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Sep 17 17:43:13 2018 -0700

--
 .../hadoop/metrics2/annotation/Metric.java  |   5 +
 .../metrics2/lib/MutableMetricsFactory.java |   4 +
 .../apache/hadoop/hdds/scm/XceiverClient.java   |   8 +-
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   9 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |  36 ++-
 .../scm/client/ContainerOperationClient.java|   2 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  11 +
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  17 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  10 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  12 +
 .../main/java/org/apache/ratis/RatisHelper.java |  22 +-
 .../common/src/main/resources/ozone-default.xml |  31 ++-
 .../container/common/impl/HddsDispatcher.java   |   6 +-
 .../common/statemachine/StateContext.java   |  45 
 .../states/endpoint/HeartbeatEndpointTask.java  |  28 +++
 .../server/ratis/ContainerStateMachine.java |  17 +-
 .../server/ratis/XceiverServerRatis.java| 205 -
 .../container/ozoneimpl/OzoneContainer.java |   2 +-
 .../StorageContainerDatanodeProtocol.proto  |  26 +++
 .../hdds/scm/container/ContainerMapping.java| 109 +++--
 .../scm/container/ContainerStateManager.java|   5 +-
 .../hadoop/hdds/scm/container/Mapping.java  |  14 ++
 .../scm/container/closer/ContainerCloser.java   | 194 
 .../hadoop/hdds/scm/events/SCMEvents.java   |  24 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |  16 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java|  34 +--
 .../pipelines/PipelineActionEventHandler.java   |  60 +
 .../scm/pipelines/PipelineCloseHandler.java |  38 
 .../hdds/scm/pipelines/PipelineManager.java |  10 +-
 .../hdds/scm/pipelines/PipelineSelector.java|  46 ++--
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  14 +-
 .../standalone/StandaloneManagerImpl.java   |   7 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  23 ++
 .../scm/server/StorageContainerManager.java |  13 +-
 .../scm/container/TestContainerMapping.java |  43 
 .../container/closer/TestContainerCloser.java   | 228 ---
 .../mapreduce/v2/hs/HistoryFileManager.java |  12 +-
 .../mapreduce/v2/hs/TestHistoryFileManager.java |  52 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |  10 +
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   6 +-
 .../hdds/scm/pipeline/TestNodeFailure.java  | 126 ++
 .../hdds/scm/pipeline/TestPipelineClose.java|   6 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  15 ++
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  21 ++
 .../transport/server/ratis/TestCSMMetrics.java  |   3 +-
 .../container/server/TestContainerServer.java   |   3 +-
 .../server/TestContainerStateMachine.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  26 ++-
 .../hadoop/ozone/om/VolumeManagerImpl.java  |   2 +-
 49 files changed, 936 insertions(+), 722 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HDDS-233. Update ozone to latest ratis snapshot build(0.3.0-50588bd-SNAPSHOT). Contributed by Shashikant Banerjee.

2018-09-17 Thread shv
HDDS-233. Update ozone to latest ratis snapshot build(0.3.0-50588bd-SNAPSHOT). 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9e0b69a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9e0b69a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9e0b69a

Branch: refs/heads/HDFS-12943
Commit: c9e0b69ab3b8e70b804e325ebe8901c2be98edca
Parents: f4bda5e
Author: Mukul Kumar Singh 
Authored: Thu Sep 13 19:00:55 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Sep 13 19:01:07 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientRatis.java | 97 
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  6 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  6 ++
 .../main/java/org/apache/ratis/RatisHelper.java | 45 +++--
 .../common/src/main/resources/ozone-default.xml |  8 ++
 .../server/ratis/XceiverServerRatis.java| 24 +++--
 .../apache/hadoop/ozone/RatisTestHelper.java| 13 +++
 .../transport/server/ratis/TestCSMMetrics.java  | 16 +---
 .../container/server/TestContainerServer.java   | 13 +--
 hadoop-project/pom.xml  |  2 +-
 10 files changed, 127 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e0b69a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 499f94d..f0db7b5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.io.MultipleIOException;
+import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.shaded.com.google.protobuf
 .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
@@ -35,15 +37,17 @@ import org.apache.ratis.RatisHelper;
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.protocol.RaftPeer;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.apache.ratis.util.CheckedBiConsumer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
@@ -65,50 +69,48 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
 final int maxOutstandingRequests =
 HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
+final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
 return new XceiverClientRatis(pipeline,
-SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests);
+SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
+retryPolicy);
   }
 
   private final Pipeline pipeline;
   private final RpcType rpcType;
   private final AtomicReference client = new AtomicReference<>();
   private final int maxOutstandingRequests;
+  private final RetryPolicy retryPolicy;
 
   /**
* Constructs a client.
*/
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
-  int maxOutStandingChunks) {
+  int maxOutStandingChunks, RetryPolicy retryPolicy) {
 super();
 this.pipeline = pipeline;
 this.rpcType = rpcType;
 this.maxOutstandingRequests = maxOutStandingChunks;
+this.retryPolicy = retryPolicy;
   }
 
   /**
* {@inheritDoc}
*/
-  public void createPipeline()
-  throws IOException {
-RaftGroupId groupId = pipeline.getId().getRaftGroupID();
-RaftGroup group = RatisHelper.newRaftGroup(groupId, 
pipeline.getMachines());
-LOG.debug("initializing pipeline:{} with nodes:{}",
-pipeline.getId(), group.getPeers());
-reinitialize(pipeline.getMachines(), RatisHelper.emptyRaftGroup(), group);
+  public void createPipeline() throws IOException {
+final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
+LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
+callRatisRpc(pipeline.getMachines(),
+(raftClient, peer) -> raftCli

[04/50] [abbrv] hadoop git commit: YARN-8729. Node status updater thread could be lost after it is restarted. Contributed by Tao Yang.

2018-09-17 Thread shv
YARN-8729. Node status updater thread could be lost after it is restarted. 
Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39c1ea1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39c1ea1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39c1ea1e

Branch: refs/heads/HDFS-12943
Commit: 39c1ea1ed454b6c61f0985fc951f20913ed964fb
Parents: c9e0b69
Author: Weiwei Yang 
Authored: Thu Sep 13 22:21:35 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Sep 13 22:21:35 2018 +0800

--
 .../hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39c1ea1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index df76ed7..3bb9f92 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -326,8 +326,8 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 statusUpdater.join();
 registerWithRM();
 statusUpdater = new Thread(statusUpdaterRunnable, "Node Status 
Updater");
-statusUpdater.start();
 this.isStopped = false;
+statusUpdater.start();
 LOG.info("NodeStatusUpdater thread is reRegistered and restarted");
   } catch (Exception e) {
 String errorMessage = "Unexpected error rebooting NodeStatusUpdater";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: YARN-8748. Javadoc warnings within the nodemanager package. Contributed by Craig Condit

2018-09-17 Thread shv
YARN-8748. Javadoc warnings within the nodemanager package. Contributed by 
Craig Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78902f02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78902f02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78902f02

Branch: refs/heads/HDFS-12943
Commit: 78902f0250e2c6d3dea7f2b5b1fcf086a80aa727
Parents: 9923760
Author: Shane Kumpf 
Authored: Fri Sep 14 10:28:36 2018 -0600
Committer: Shane Kumpf 
Committed: Fri Sep 14 10:28:36 2018 -0600

--
 .../server/nodemanager/ContainerExecutor.java   |  3 ++-
 .../TrafficControlBandwidthHandlerImpl.java |  2 +-
 .../JavaSandboxLinuxContainerRuntime.java   | 28 +---
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78902f02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index ba272e2..98cc2a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.ShellScriptBuilder;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
@@ -88,7 +89,7 @@ public abstract class ContainerExecutor implements 
Configurable {
   /**
* The relative path to which debug information will be written.
*
-   * @see ContainerLaunch.ShellScriptBuilder#listDebugInformation
+   * @see ShellScriptBuilder#listDebugInformation
*/
   public static final String DIRECTORY_CONTENTS = "directory.info";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78902f02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
index c04e935..a65de02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
@@ -241,7 +241,7 @@ public class TrafficControlBandwidthHandlerImpl
* Cleanup operations once container is completed - deletes cgroup and
* removes traffic shaping rule(s).
* @param containerId of the container that was completed.
-   * @return
+   * @return null
* @throws ResourceHandlerException
*/
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78902f02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java

[30/50] [abbrv] hadoop git commit: HDDS-465. Suppress group mapping lookup warnings for ozone. Contributed by Xiaoyu Yao.

2018-09-17 Thread shv
HDDS-465. Suppress group mapping lookup warnings for ozone. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87e2c0f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87e2c0f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87e2c0f4

Branch: refs/heads/HDFS-12943
Commit: 87e2c0f4258f2e46183f796d7d904c0b27030df0
Parents: c9fa081
Author: Nanda kumar 
Authored: Sat Sep 15 23:14:57 2018 +0530
Committer: Nanda kumar 
Committed: Sat Sep 15 23:14:57 2018 +0530

--
 hadoop-dist/src/main/compose/ozone/docker-config | 1 +
 hadoop-dist/src/main/compose/ozoneperf/docker-config | 1 +
 hadoop-hdds/common/src/main/conf/log4j.properties| 1 +
 3 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2c0f4/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 0def70e..0bf76a3 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,6 +31,7 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2c0f4/hadoop-dist/src/main/compose/ozoneperf/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config 
b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index 309adee..acfdb86 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -31,4 +31,5 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 
HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2c0f4/hadoop-hdds/common/src/main/conf/log4j.properties
--
diff --git a/hadoop-hdds/common/src/main/conf/log4j.properties 
b/hadoop-hdds/common/src/main/conf/log4j.properties
index 87c8da8..663e254 100644
--- a/hadoop-hdds/common/src/main/conf/log4j.properties
+++ b/hadoop-hdds/common/src/main/conf/log4j.properties
@@ -154,3 +154,4 @@ log4j.logger.org.apache.commons.beanutils=WARN
 
 log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: YARN-8772. Annotation javax.annotation.Generated has moved

2018-09-17 Thread shv
YARN-8772. Annotation javax.annotation.Generated has moved

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/568ebecd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/568ebecd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/568ebecd

Branch: refs/heads/HDFS-12943
Commit: 568ebecdf49d0919db1a8d856043c10b76326c34
Parents: ef5c776
Author: Andrew Purtell 
Authored: Thu Sep 13 01:27:02 2018 +
Committer: Akira Ajisaka 
Committed: Fri Sep 14 15:02:52 2018 +0900

--
 .../java/org/apache/hadoop/yarn/service/api/records/Artifact.java | 1 -
 .../org/apache/hadoop/yarn/service/api/records/Component.java | 1 -
 .../org/apache/hadoop/yarn/service/api/records/ConfigFile.java| 1 -
 .../org/apache/hadoop/yarn/service/api/records/Configuration.java | 1 -
 .../org/apache/hadoop/yarn/service/api/records/Container.java | 1 -
 .../java/org/apache/hadoop/yarn/service/api/records/Error.java| 1 -
 .../apache/hadoop/yarn/service/api/records/KerberosPrincipal.java | 2 --
 .../hadoop/yarn/service/api/records/PlacementConstraint.java  | 3 ---
 .../apache/hadoop/yarn/service/api/records/PlacementPolicy.java   | 3 ---
 .../apache/hadoop/yarn/service/api/records/PlacementScope.java| 3 ---
 .../org/apache/hadoop/yarn/service/api/records/PlacementType.java | 3 ---
 .../apache/hadoop/yarn/service/api/records/ReadinessCheck.java| 1 -
 .../java/org/apache/hadoop/yarn/service/api/records/Resource.java | 1 -
 .../hadoop/yarn/service/api/records/ResourceInformation.java  | 2 --
 .../java/org/apache/hadoop/yarn/service/api/records/Service.java  | 1 -
 .../org/apache/hadoop/yarn/service/api/records/ServiceState.java  | 1 -
 .../org/apache/hadoop/yarn/service/api/records/ServiceStatus.java | 1 -
 17 files changed, 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/568ebecd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java
index ce062cc..bba5791 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 @ApiModel(description = "Artifact of an service component")
-@javax.annotation.Generated(value = "class 
io.swagger.codegen.languages.JavaClientCodegen", date = 
"2016-06-02T08:15:05.615-07:00")
 @JsonInclude(JsonInclude.Include.NON_NULL)
 public class Artifact implements Serializable {
   private static final long serialVersionUID = 3608929500111099035L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/568ebecd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
index 0481123..9f64b29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 @ApiModel(description = "One or more components of the service. If the service 
is HBase say, then the component can be a simple role like master or 
regionserver. If the service is a complex business webapp then

[24/50] [abbrv] hadoop git commit: HDDS-419. ChunkInputStream bulk read api does not read from all the chunks. Contributed by Lokesh Jain and Mukul Kumar.

2018-09-17 Thread shv
HDDS-419. ChunkInputStream bulk read api does not read from all the chunks. 
Contributed by Lokesh Jain and Mukul Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f037468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f037468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f037468

Branch: refs/heads/HDFS-12943
Commit: 6f037468bce7bbda6b9fc01166f2c61ae40b690b
Parents: 488806b
Author: Xiaoyu Yao 
Authored: Fri Sep 14 13:34:29 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Sep 14 13:45:38 2018 -0700

--
 .../hdds/scm/storage/ChunkInputStream.java  | 30 +++-
 .../ozone/client/io/ChunkGroupInputStream.java  | 25 
 .../hadoop/ozone/freon/TestDataValidate.java|  4 +--
 .../hadoop/ozone/freon/RandomKeyGenerator.java  |  7 +++--
 4 files changed, 42 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f037468/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
index a969b68..a483197 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -121,12 +121,17 @@ public class ChunkInputStream extends InputStream 
implements Seekable {
   return 0;
 }
 checkOpen();
-int available = prepareRead(len);
-if (available == EOF) {
-  return EOF;
+int total = 0;
+while (len > 0) {
+  int available = prepareRead(len);
+  if (available == EOF) {
+return total != 0 ? total : EOF;
+  }
+  buffers.get(bufferIndex).get(b, off + total, available);
+  len -= available;
+  total += available;
 }
-buffers.get(bufferIndex).get(b, off, available);
-return available;
+return total;
   }
 
   @Override
@@ -196,13 +201,20 @@ public class ChunkInputStream extends InputStream 
implements Seekable {
 // next chunk
 chunkIndex += 1;
 final ReadChunkResponseProto readChunkResponse;
+final ChunkInfo chunkInfo = chunks.get(chunkIndex);
 try {
-  readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
-  chunks.get(chunkIndex), blockID, traceID);
+  readChunkResponse = ContainerProtocolCalls
+  .readChunk(xceiverClient, chunkInfo, blockID, traceID);
 } catch (IOException e) {
   throw new IOException("Unexpected OzoneException: " + e.toString(), e);
 }
 ByteString byteString = readChunkResponse.getData();
+if (byteString.size() != chunkInfo.getLen()) {
+  // Bytes read from chunk should be equal to chunk size.
+  throw new IOException(String
+  .format("Inconsistent read for chunk=%s len=%d bytesRead=%d",
+  chunkInfo.getChunkName(), chunkInfo.getLen(), 
byteString.size()));
+}
 buffers = byteString.asReadOnlyByteBufferList();
 bufferIndex = 0;
   }
@@ -260,4 +272,8 @@ public class ChunkInputStream extends InputStream 
implements Seekable {
   public boolean seekToNewSource(long targetPos) throws IOException {
 return false;
   }
+
+  public BlockID getBlockID() {
+return blockID;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f037468/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 742cfcc..94966f6 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -115,19 +115,20 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
 return totalReadLen == 0 ? EOF : totalReadLen;
   }
   ChunkInputStreamEntry current = streamEntries.get(currentStreamIndex);
-  int readLen = Math.min(len, (int)current.getRemaining());
-  int actualLen = current.read(b, off, readLen);
-  // this means the underlying stream has nothing at all, return
-  if (actualLen == EOF) {
-return totalReadLen > 0 ? totalReadLen : EOF;
+  int numBytesToRead = Math.min(len, (int)current.getRemaining());
+  int numBytesRead = current.read(b, off, numBytesToRead);
+  if 

[10/50] [abbrv] hadoop git commit: HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot enabled" status. Contributed by Siyao Meng.

2018-09-17 Thread shv
HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot 
enabled" status. Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1de8cba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1de8cba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1de8cba

Branch: refs/heads/HDFS-12943
Commit: a1de8cbac5fb9af403db2a02814575f0940d5f39
Parents: 4441fe9
Author: Wei-Chiu Chuang 
Authored: Fri Sep 14 05:22:56 2018 +0800
Committer: Wei-Chiu Chuang 
Committed: Fri Sep 14 05:22:56 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/web/JsonUtilClient.java |  4 
 .../java/org/apache/hadoop/hdfs/web/TestWebHDFS.java| 12 
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1de8cba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 9bb1846..a685573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -133,6 +133,7 @@ class JsonUtilClient {
 Boolean aclBit = (Boolean) m.get("aclBit");
 Boolean encBit = (Boolean) m.get("encBit");
 Boolean erasureBit  = (Boolean) m.get("ecBit");
+Boolean snapshotEnabledBit  = (Boolean) m.get("snapshotEnabled");
 EnumSet f =
 EnumSet.noneOf(HdfsFileStatus.Flags.class);
 if (aclBit != null && aclBit) {
@@ -144,6 +145,9 @@ class JsonUtilClient {
 if (erasureBit != null && erasureBit) {
   f.add(HdfsFileStatus.Flags.HAS_EC);
 }
+if (snapshotEnabledBit != null && snapshotEnabledBit) {
+  f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
+}
 
 Map ecPolicyObj = (Map) m.get("ecPolicyObj");
 ErasureCodingPolicy ecPolicy = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1de8cba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index bb1c398..5d33220 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -482,6 +482,9 @@ public class TestWebHDFS {
 
   // allow snapshots on /bar using webhdfs
   webHdfs.allowSnapshot(bar);
+  // check if snapshot status is enabled
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   webHdfs.createSnapshot(bar, "s1");
   final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
   Assert.assertTrue(webHdfs.exists(s1path));
@@ -491,15 +494,24 @@ public class TestWebHDFS {
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   dfs.deleteSnapshot(bar, "s1");
   dfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
 
   // disallow snapshots on /bar using webhdfs
   dfs.allowSnapshot(bar);
+  // check if snapshot status is enabled, again
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertEquals(1, snapshottableDirs.length);
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   webHdfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled, again
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-17 Thread shv
MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2886024a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2886024a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2886024a

Branch: refs/heads/HDFS-12943
Commit: 2886024ac3a8613ecc27f1595b278ce6fc2d03ba
Parents: 250b500
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:41:38 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index d92c275..32d054f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -483,6 +484,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
--
diff --git 
a

[25/50] [abbrv] hadoop git commit: HDDS-466. Handle null in argv of StorageContainerManager#createSCM. Contributed by Nanda kumar

2018-09-17 Thread shv
HDDS-466. Handle null in argv of StorageContainerManager#createSCM. Contributed 
by Nanda kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0a659c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0a659c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0a659c8

Branch: refs/heads/HDFS-12943
Commit: b0a659c8c0500c47f974518ec98ae44c283db9f4
Parents: 6f03746
Author: Nanda kumar 
Authored: Sat Sep 15 02:34:33 2018 +0530
Committer: Nanda kumar 
Committed: Sat Sep 15 02:34:33 2018 +0530

--
 .../apache/hadoop/hdds/scm/server/StorageContainerManager.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0a659c8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index b3408a4..60796c7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -376,9 +376,10 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 out.println(USAGE + "\n");
   }
 
-  public static StorageContainerManager createSCM(String[] argv,
+  public static StorageContainerManager createSCM(String[] args,
   OzoneConfiguration conf)
   throws IOException {
+String[] argv = (args == null) ? new String[0] : args;
 if (!HddsUtils.isHddsEnabled(conf)) {
   System.err.println(
   "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" 
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: MAPREDUCE-7140. Refactoring TaskAttemptInfo to separate Map and Reduce tasks. Contributed by Oleksandr Shevchenko

2018-09-17 Thread shv
MAPREDUCE-7140. Refactoring TaskAttemptInfo to separate Map and Reduce tasks. 
Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/488806ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/488806ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/488806ba

Branch: refs/heads/HDFS-12943
Commit: 488806baca6d84c12b24532ddeacf6d249c2136b
Parents: 5470de4
Author: Jason Lowe 
Authored: Fri Sep 14 15:04:18 2018 -0500
Committer: Jason Lowe 
Committed: Fri Sep 14 15:10:27 2018 -0500

--
 .../mapreduce/v2/app/webapp/AMWebServices.java  |  9 +++--
 .../v2/app/webapp/JAXBContextResolver.java  |  7 ++--
 .../mapreduce/v2/app/webapp/TaskPage.java   |  3 +-
 .../v2/app/webapp/dao/MapTaskAttemptInfo.java   | 39 
 .../app/webapp/dao/ReduceTaskAttemptInfo.java   | 11 +++---
 .../v2/app/webapp/dao/TaskAttemptInfo.java  | 14 +++
 .../v2/app/webapp/dao/TaskAttemptsInfo.java | 10 ++---
 .../mapreduce/v2/hs/webapp/HsTaskPage.java  |  3 +-
 .../mapreduce/v2/hs/webapp/HsTasksBlock.java|  5 ++-
 .../mapreduce/v2/hs/webapp/HsWebServices.java   |  9 +++--
 .../v2/hs/webapp/JAXBContextResolver.java   |  4 +-
 11 files changed, 77 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/488806ba/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
index f477d31..fe3ace8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.MapTaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
@@ -396,9 +397,9 @@ public class AMWebServices {
 for (TaskAttempt ta : task.getAttempts().values()) {
   if (ta != null) {
 if (task.getType() == TaskType.REDUCE) {
-  attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
+  attempts.add(new ReduceTaskAttemptInfo(ta));
 } else {
-  attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
+  attempts.add(new MapTaskAttemptInfo(ta, true));
 }
   }
 }
@@ -419,9 +420,9 @@ public class AMWebServices {
 Task task = getTaskFromTaskIdString(tid, job);
 TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
 if (task.getType() == TaskType.REDUCE) {
-  return new ReduceTaskAttemptInfo(ta, task.getType());
+  return new ReduceTaskAttemptInfo(ta);
 } else {
-  return new TaskAttemptInfo(ta, task.getType(), true);
+  return new MapTaskAttemptInfo(ta, true);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/488806ba/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
index 88c7d86..625eb4e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
@@ -42,8 +42,8 @@ import 
org.apache.hadoo

[08/50] [abbrv] hadoop git commit: HDDS-456. TestOzoneShell#init is breaking due to Null Pointer Exception. Contributed by Dinesh Chitlangia.

2018-09-17 Thread shv
HDDS-456. TestOzoneShell#init is breaking due to Null Pointer Exception. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76a0fdfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76a0fdfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76a0fdfe

Branch: refs/heads/HDFS-12943
Commit: 76a0fdfed9e3b972e0904503dcb0901c546c1d8c
Parents: 2886024
Author: Arpit Agarwal 
Authored: Thu Sep 13 13:00:35 2018 -0700
Committer: Arpit Agarwal 
Committed: Thu Sep 13 13:00:35 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java| 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76a0fdfe/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index b5bd361..71cc6ba 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -305,6 +305,9 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
   terminate(0);
   return null;
 default:
+  if (argv == null) {
+argv = new String[]{};
+  }
   StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
   return new OzoneManager(conf);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDDS-362. Modify functions impacted by SCM chill mode in ScmBlockLocationProtocol. Contributed by Ajay Kumar.

2018-09-17 Thread shv
HDDS-362. Modify functions impacted by SCM chill mode in 
ScmBlockLocationProtocol. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95231f17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95231f17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95231f17

Branch: refs/heads/HDFS-12943
Commit: 95231f1749301b011fe48c9399953f774c40513d
Parents: 07385f8
Author: Xiaoyu Yao 
Authored: Sun Sep 16 17:55:46 2018 -0700
Committer: Xiaoyu Yao 
Committed: Sun Sep 16 17:55:46 2018 -0700

--
 hadoop-hdds/common/src/main/proto/hdds.proto|  7 ++
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 49 +--
 .../replication/ReplicationActivityStatus.java  | 55 +---
 .../hadoop/hdds/scm/events/SCMEvents.java   |  2 +
 .../hdds/scm/server/ChillModePrecheck.java  | 54 
 .../apache/hadoop/hdds/scm/server/Precheck.java | 29 +++
 .../hdds/scm/server/SCMChillModeManager.java| 49 ++-
 .../scm/server/StorageContainerManager.java |  7 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java | 89 +++-
 .../TestReplicationActivityStatus.java  | 63 ++
 10 files changed, 360 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95231f17/hadoop-hdds/common/src/main/proto/hdds.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
index 89c928b..41f1851 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -171,6 +171,13 @@ enum ReplicationFactor {
 THREE = 3;
 }
 
+enum ScmOps {
+allocateBlock = 1;
+keyBlocksInfoList = 2;
+getScmInfo = 3;
+deleteBlock = 4;
+}
+
 /**
  * Block ID that uniquely identify a block by SCM.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95231f17/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index e4e33c7..8322b73 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
@@ -28,6 +29,9 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.server.ChillModePrecheck;
+import org.apache.hadoop.hdds.scm.server.Precheck;
+import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -61,7 +65,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
 .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
 
 /** Block Manager manages the block access for SCM. */
-public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
+public class BlockManagerImpl implements EventHandler,
+BlockManager, BlockmanagerMXBean {
   private static final Logger LOG =
   LoggerFactory.getLogger(BlockManagerImpl.class);
   // TODO : FIX ME : Hard coding the owner.
@@ -80,6 +85,7 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
   private final int containerProvisionBatchSize;
   private final Random rand;
   private ObjectName mxBean;
+  private ChillModePrecheck chillModePrecheck;
 
   /**
* Constructor.
@@ -125,6 +131,7 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
 blockDeletingService =
 new SCMBlockDeletingService(deletedBlockLog, containerManager,
 nodeManager, eventPublisher, svcInterval, serviceTimeout, conf);
+chillModePrecheck = new ChillModePrecheck();
   }
 
   /**
@@ -187,19 +194,13 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
   ReplicationType type, Repl

[15/50] [abbrv] hadoop git commit: HDDS-438. 'ozone oz' should print usage when command or sub-command is missing. Contributed by Dinesh Chitlangia.

2018-09-17 Thread shv
HDDS-438. 'ozone oz' should print usage when command or sub-command is missing. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f12f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f12f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f12f1

Branch: refs/heads/HDFS-12943
Commit: a12f12f1af22a28bce2d361a506394761cf53aa7
Parents: f1a893f
Author: Arpit Agarwal 
Authored: Fri Sep 14 07:02:28 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Sep 14 07:02:28 2018 -0700

--
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  5 +-
 .../hdds/cli/MissingSubcommandException.java| 10 ++-
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 69 ++--
 .../web/ozShell/bucket/BucketCommands.java  |  3 +-
 .../ozone/web/ozShell/keys/KeyCommands.java |  3 +-
 .../web/ozShell/volume/VolumeCommands.java  |  3 +-
 6 files changed, 82 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f12f1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index 9a0be44..e56810c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -67,11 +67,14 @@ public class GenericCli implements Callable, 
GenericParentCommand {
 } else {
   System.err.println(error.getMessage().split("\n")[0]);
 }
+if(error instanceof MissingSubcommandException){
+  System.err.println(((MissingSubcommandException) error).getUsage());
+}
   }
 
   @Override
   public Void call() throws Exception {
-throw new MissingSubcommandException();
+throw new MissingSubcommandException(cmd.getUsageMessage());
   }
 
   public OzoneConfiguration createOzoneConfiguration() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f12f1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
index bf3818f..9f0c494 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
@@ -22,8 +22,14 @@ package org.apache.hadoop.hdds.cli;
  */
 public class MissingSubcommandException extends RuntimeException {
 
-  public MissingSubcommandException() {
-super("Please select a subcommand");
+  private String usage;
+
+  public MissingSubcommandException(String usage) {
+super("Incomplete command");
+this.usage = usage;
   }
 
+  public String getUsage() {
+return usage;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f12f1/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index f872865..c80030e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -33,6 +33,7 @@ import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.cli.MissingSubcommandException;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -236,10 +237,10 @@ public class TestOzoneShell {
 assertEquals(userName, volumeInfo.getOwner());
   }
 
-  private void execute(Shell shell, String[] args) {
+  private void execute(Shell ozoneShell, String[] args) {
 List arguments = new ArrayList(Arrays.asList(args));
 LOG.info("Executing shell command with args {}", arguments);
-CommandLine cmd = shell.getCmd();
+CommandLine cmd = ozoneShell.getCmd();
 
 IExceptionHandler2> exceptionHandler =
 new IExceptionHandler2>() {
@@ -310,6 +311,29 @@ public class TestOzoneShell {
   }
 
   @Test
+  public void testShellIncompleteCom

[05/50] [abbrv] hadoop git commit: HDFS-13914. Fix DN UI logs link broken when https is enabled after HDFS-13902. Contributed by Jianfei Jiang.

2018-09-17 Thread shv
HDFS-13914. Fix DN UI logs link broken when https is enabled after HDFS-13902. 
Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1b242a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1b242a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1b242a9

Branch: refs/heads/HDFS-12943
Commit: e1b242a9844d6438c93cdc2ab2443d042b4d5aea
Parents: 39c1ea1
Author: Inigo Goiri 
Authored: Thu Sep 13 09:16:58 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Sep 13 09:16:58 2018 -0700

--
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b242a9/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index ba3eadc..0fdf552 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -36,7 +36,7 @@
 
   Utilities 
   
-Logs
+Logs
 Metrics
 Configuration
 Process Thread Dump


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HDDS-429. StorageContainerManager lock optimization. Contributed by Nanda Kumar.

2018-09-17 Thread shv
HDDS-429. StorageContainerManager lock optimization.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c8a43b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c8a43b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c8a43b9

Branch: refs/heads/HDFS-12943
Commit: 0c8a43b9ec77b3ba7b2bb4c8aa863b1deba3bc7b
Parents: 144a55f
Author: Anu Engineer 
Authored: Fri Sep 14 10:08:06 2018 -0700
Committer: Anu Engineer 
Committed: Fri Sep 14 10:08:06 2018 -0700

--
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 229 ++-
 .../scm/container/states/ContainerStateMap.java | 177 --
 2 files changed, 226 insertions(+), 180 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c8a43b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 82d9a28..e4e33c7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.hdds.scm.block;
 
-import java.util.UUID;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -45,8 +44,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
 .CHILL_MODE_EXCEPTION;
@@ -72,7 +71,7 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
   private final NodeManager nodeManager;
   private final Mapping containerManager;
 
-  private final Lock lock;
+  private final ReadWriteLock lock;
   private final long containerSize;
 
   private final DeletedBlockLog deletedBlockLog;
@@ -108,7 +107,7 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
 ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
 ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT);
 rand = new Random();
-this.lock = new ReentrantLock();
+this.lock = new ReentrantReadWriteLock();
 
 mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
 
@@ -155,29 +154,22 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
* @param factor - how many copies needed for this container.
* @throws IOException
*/
-  private void preAllocateContainers(int count, ReplicationType type,
-  ReplicationFactor factor, String owner)
+  private synchronized void preAllocateContainers(int count,
+  ReplicationType type, ReplicationFactor factor, String owner)
   throws IOException {
-lock.lock();
-try {
-  for (int i = 0; i < count; i++) {
-ContainerWithPipeline containerWithPipeline;
-try {
-  // TODO: Fix this later when Ratis is made the Default.
-  containerWithPipeline = containerManager.allocateContainer(
-  type, factor, owner);
+for (int i = 0; i < count; i++) {
+  ContainerWithPipeline containerWithPipeline;
+  try {
+// TODO: Fix this later when Ratis is made the Default.
+containerWithPipeline = containerManager.allocateContainer(
+type, factor, owner);
 
-  if (containerWithPipeline == null) {
-LOG.warn("Unable to allocate container.");
-continue;
-  }
-} catch (IOException ex) {
-  LOG.warn("Unable to allocate container: {}", ex);
-  continue;
+if (containerWithPipeline == null) {
+  LOG.warn("Unable to allocate container.");
 }
+  } catch (IOException ex) {
+LOG.warn("Unable to allocate container: {}", ex);
   }
-} finally {
-  lock.unlock();
 }
   }
 
@@ -208,46 +200,61 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
   CHILL_MODE_EXCEPTION);
 }
 
-lock.lock();
-try {
-  /*
-   Here is the high level logic.
-
-   1. First we check if there are containers in ALLOCATED state,
-   that is
-SCM has allocated them in the SCM namespace but the
- 

[50/50] [abbrv] hadoop git commit: HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed by Konstantin

2018-09-17 Thread shv
conf, uri, xface, factory);
-
-  // Create but DON'T set in HAProxyFactory.
-  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
-
-  AC_LIST.add(alignmentContext);
+  AC_LIST.add((ClientGSIContext) getAlignmentContext());
 }
   }
 
@@ -121,23 +93,21 @@ public class TestStateAlignmentContextWithHA {
 CONF.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
 CONF.setBoolean("fs.hdfs.impl.disable.cache", true);
 
-MiniDFSNNTopology.NSConf nsConf = new 
MiniDFSNNTopology.NSConf(NAMESERVICE);
-nsConf.addNN(new MiniDFSNNTopology.NNConf("nn1"));
-nsConf.addNN(new MiniDFSNNTopology.NNConf("nn2"));
-
 cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(NUMDATANODES)
-
.nnTopology(MiniDFSNNTopology.simpleHATopology().addNameservice(nsConf))
+.nnTopology(MiniDFSNNTopology.simpleHATopology(3))
 .build();
 cluster.waitActive();
 cluster.transitionToActive(0);
+cluster.transitionToObserver(2);
+
+String nameservice = HATestUtil.getLogicalHostname(cluster);
+HATestUtil.setFailoverConfigurations(cluster, CONF, nameservice, 0);
+CONF.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
+"." + nameservice, ORPPwithAlignmentContexts.class.getName());
   }
 
   @Before
   public void before() throws IOException, URISyntaxException {
-killWorkers();
-HATestUtil.setFailoverConfigurations(cluster, CONF, NAMESERVICE, 0);
-CONF.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
-"." + NAMESERVICE, AlignmentContextProxyProvider.class.getName());
 dfs = (DistributedFileSystem) FileSystem.get(CONF);
   }
 
@@ -151,6 +121,7 @@ public class TestStateAlignmentContextWithHA {
 
   @After
   public void after() throws IOException {
+killWorkers();
 cluster.transitionToStandby(1);
 cluster.transitionToActive(0);
 active = 0;
@@ -160,26 +131,6 @@ public class TestStateAlignmentContextWithHA {
   dfs = null;
 }
 AC_LIST.clear();
-spy = null;
-  }
-
-  /**
-   * This test checks if after a client writes we can see the state id in
-   * updated via the response.
-   */
-  @Test
-  public void testNoStateOnConfiguredProxyProvider() throws Exception {
-Configuration confCopy = new Configuration(CONF);
-confCopy.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
-"." + NAMESERVICE, SpyConfiguredContextProxyProvider.class.getName());
-
-try (DistributedFileSystem clearDfs =
- (DistributedFileSystem) FileSystem.get(confCopy)) {
-  ClientGSIContext clientState = getContext(1);
-  assertThat(clientState.getLastSeenStateId(), is(Long.MIN_VALUE));
-  DFSTestUtil.writeFile(clearDfs, new Path("/testFileNoState"), 
"no_state");
-  assertThat(clientState.getLastSeenStateId(), is(Long.MIN_VALUE));
-}
   }
 
   /**
@@ -234,48 +185,6 @@ public class TestStateAlignmentContextWithHA {
   }
 
   /**
-   * This test mocks an AlignmentContext and ensures that DFSClient
-   * writes its lastSeenStateId into RPC requests.
-   */
-  @Test
-  public void testClientSendsState() throws Exception {
-ClientGSIContext alignmentContext = new ClientGSIContext();
-ClientGSIContext spiedAlignContext = Mockito.spy(alignmentContext);
-spy = spiedAlignContext;
-
-try (DistributedFileSystem clearDfs =
- (DistributedFileSystem) FileSystem.get(CONF)) {
-
-  // Collect RpcRequestHeaders for verification later.
-  final List headers =
-  new ArrayList<>();
-  Mockito.doAnswer(a -> {
-Object[] arguments = a.getArguments();
-RpcHeaderProtos.RpcRequestHeaderProto.Builder header =
-(RpcHeaderProtos.RpcRequestHeaderProto.Builder) arguments[0];
-headers.add(header);
-return a.callRealMethod();
-  }).when(spiedAlignContext).updateRequestState(Mockito.any());
-
-  DFSTestUtil.writeFile(clearDfs, new Path("/testFile4"), "shv");
-
-  // Ensure first header and last header have different state.
-  assertThat(headers.size() > 1, is(true));
-  assertThat(headers.get(0).getStateId(),
-  is(not(headers.get(headers.size() - 1;
-
-  // Ensure collected RpcRequestHeaders are in increasing order.
-  long lastHeader = headers.get(0).getStateId();
-  for (RpcHeaderProtos.RpcRequestHeaderProto.Builder header :
-  headers.subList(1, headers.size())) {
-long currentHeader = header.getStateId();
-assertThat(currentHeader >= lastHeader, is(true));
-lastHeader = header.getStateId();
-  }
-}
-  }
-
-  /**
* This test checks if after a client writes we can see the state id in
* updated via the response.
*/
@@ -310,14 +219,22 @@ public class TestStateAlignmentContextWithHA {
 
   @Test(timeout=30)
   public void testMultiClientStatesWithRandom

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-09-17 Thread shv
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30d1cd9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30d1cd9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30d1cd9f

Branch: refs/heads/HDFS-12943
Commit: 30d1cd9f9338456f2a7cb80c9b8270ab0fe46ca1
Parents: 9ade422 0a26c52
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 18:59:40 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Sep 17 18:59:40 2018 -0700

--
 dev-support/bin/create-release  |4 +-
 dev-support/bin/ozone-dist-layout-stitching |   27 +-
 dev-support/bin/yetus-wrapper   |2 +-
 dev-support/docker/Dockerfile   |  212 +-
 .../assemblies/hadoop-src-with-hdds.xml |   56 +
 .../assemblies/hadoop-src-with-hdsl.xml |   56 -
 hadoop-common-project/hadoop-common/pom.xml |2 +
 .../src/main/conf/log4j.properties  |   23 -
 .../apache/hadoop/crypto/CryptoStreamUtils.java |   21 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|2 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |7 +-
 .../org/apache/hadoop/io/nativeio/NativeIO.java |   15 +-
 .../org/apache/hadoop/util/CleanerUtil.java |  199 +
 .../hadoop/util/curator/ZKCuratorManager.java   |   10 +-
 .../src/main/resources/core-default.xml |   24 +-
 .../markdown/release/0.1.0/CHANGELOG.0.1.0.md   |  101 +
 .../markdown/release/0.1.0/CHANGES.0.1.0.md |  101 -
 .../markdown/release/0.1.1/CHANGELOG.0.1.1.md   |   39 +
 .../markdown/release/0.1.1/CHANGES.0.1.1.md |   39 -
 .../markdown/release/0.10.0/CHANGELOG.0.10.0.md |  101 +
 .../markdown/release/0.10.0/CHANGES.0.10.0.md   |  101 -
 .../markdown/release/0.10.1/CHANGELOG.0.10.1.md |   49 +
 .../markdown/release/0.10.1/CHANGES.0.10.1.md   |   49 -
 .../markdown/release/0.11.0/CHANGELOG.0.11.0.md |   96 +
 .../markdown/release/0.11.0/CHANGES.0.11.0.md   |   96 -
 .../markdown/release/0.11.1/CHANGELOG.0.11.1.md |   34 +
 .../markdown/release/0.11.1/CHANGES.0.11.1.md   |   34 -
 .../markdown/release/0.11.2/CHANGELOG.0.11.2.md |   33 +
 .../markdown/release/0.11.2/CHANGES.0.11.2.md   |   33 -
 .../markdown/release/0.12.0/CHANGELOG.0.12.0.md |  113 +
 .../markdown/release/0.12.0/CHANGES.0.12.0.md   |  113 -
 .../markdown/release/0.12.1/CHANGELOG.0.12.1.md |   59 +
 .../markdown/release/0.12.1/CHANGES.0.12.1.md   |   59 -
 .../markdown/release/0.12.2/CHANGELOG.0.12.2.md |   34 +
 .../markdown/release/0.12.2/CHANGES.0.12.2.md   |   34 -
 .../markdown/release/0.12.3/CHANGELOG.0.12.3.md |   38 +
 .../markdown/release/0.12.3/CHANGES.0.12.3.md   |   38 -
 .../markdown/release/0.13.0/CHANGELOG.0.13.0.md |  173 +
 .../markdown/release/0.13.0/CHANGES.0.13.0.md   |  173 -
 .../markdown/release/0.14.0/CHANGELOG.0.14.0.md |  214 +
 .../markdown/release/0.14.0/CHANGES.0.14.0.md   |  214 -
 .../markdown/release/0.14.1/CHANGELOG.0.14.1.md |   33 +
 .../markdown/release/0.14.1/CHANGES.0.14.1.md   |   33 -
 .../markdown/release/0.14.2/CHANGELOG.0.14.2.md |   40 +
 .../markdown/release/0.14.2/CHANGES.0.14.2.md   |   40 -
 .../markdown/release/0.14.3/CHANGELOG.0.14.3.md |   34 +
 .../markdown/release/0.14.3/CHANGES.0.14.3.md   |   34 -
 .../markdown/release/0.14.4/CHANGELOG.0.14.4.md |   39 +
 .../markdown/release/0.14.4/CHANGES.0.14.4.md   |   39 -
 .../markdown/release/0.15.0/CHANGELOG.0.15.0.md |  190 +
 .../markdown/release/0.15.0/CHANGES.0.15.0.md   |  190 -
 .../markdown/release/0.15.1/CHANGELOG.0.15.1.md |   49 +
 .../markdown/release/0.15.1/CHANGES.0.15.1.md   |   49 -
 .../markdown/release/0.15.2/CHANGELOG.0.15.2.md |   51 +
 .../markdown/release/0.15.2/CHANGES.0.15.2.md   |   51 -
 .../markdown/release/0.15.3/CHANGELOG.0.15.3.md |   35 +
 .../markdown/release/0.15.3/CHANGES.0.15.3.md   |   35 -
 .../markdown/release/0.15.4/CHANGELOG.0.15.4.md |   31 +
 .../markdown/release/0.15.4/CHANGES.0.15.4.md   |   31 -
 .../markdown/release/0.16.0/CHANGELOG.0.16.0.md |  225 ++
 .../markdown/release/0.16.0/CHANGES.0.16.0.md   |  225 --
 .../markdown/release/0.16.1/CHANGELOG.0.16.1.md |   94 +
 .../markdown/release/0.16.1/CHANGES.0.16.1.md   |   94 -
 .../markdown/release/0.16.2/CHANGELOG.0.16.2.md |   59 +
 .../markdown/release/0.16.2/CHANGES.0.16.2.md   |   59 -
 .../markdown/release/0.16.3/CHANGELOG.0.16.3.md |   37 +
 .../markdown/release/0.16.3/CHANGES.0.16.3.md   |   37 -
 .../markdown/release/0.16.4/CHANGELOG.0.16.4.md |   34 +
 .../markdown/release/0.16.4/CHANGES.0.16.4.md   |   34 -
 .../markdown/release/0.17.0/CHANGELOG.0.17.0.md |  259 ++
 .../markdown/release/0.17.0/CHANGES.0.17.0.md   |  259 --
 .../markdown/release/0.17.1/CHANGELOG.0.17.1.md |   44 +
 .../markdown/release/0.17.1/CHANGES.0.17.1.md   |   44 -
 .../markdown/release/0.17.2/CHANGELOG.0.17.2.md |   43 +
 .../markdown/release/0.17.2/CHANGES.0.17.2.md   |   43 -
 .../mark

[26/50] [abbrv] hadoop git commit: HDDS-446. Provide shaded artifact to start ozone service as a datanode plugin. Contributed by Elek Marton

2018-09-17 Thread shv
HDDS-446. Provide shaded artifact to start ozone service as a datanode plugin. 
Contributed by Elek Marton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b95aa560
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b95aa560
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b95aa560

Branch: refs/heads/HDFS-12943
Commit: b95aa560628b527b1534a7f148bd61605deb90b3
Parents: b0a659c
Author: Bharat Viswanadham 
Authored: Fri Sep 14 14:11:53 2018 -0700
Committer: Bharat Viswanadham 
Committed: Fri Sep 14 14:12:03 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |  8 +++
 hadoop-dist/src/main/compose/ozone-hdfs/.env| 17 +
 .../main/compose/ozone-hdfs/docker-compose.yaml | 60 
 .../src/main/compose/ozone-hdfs/docker-config   | 75 
 hadoop-ozone/objectstore-service/pom.xml| 52 ++
 5 files changed, 212 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95aa560/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index d91c7ef..128ce10 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -143,8 +143,16 @@ run copy 
"${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
 run copy 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}"
 .
 run copy 
"${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
 run copy 
"${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+
+#shaded ozonefs
 mkdir -p "./share/hadoop/ozonefs"
 cp 
"${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar"
 "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
+
+#shaded datanode service
+mkdir -p "./share/hadoop/ozoneplugin"
+cp 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar"
 "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar"
+
+
 # Optional documentation, could be missing
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95aa560/hadoop-dist/src/main/compose/ozone-hdfs/.env
--
diff --git a/hadoop-dist/src/main/compose/ozone-hdfs/.env 
b/hadoop-dist/src/main/compose/ozone-hdfs/.env
new file mode 100644
index 000..c437513
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozone-hdfs/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+HADOOP_VERSION=3.1.0
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95aa560/hadoop-dist/src/main/compose/ozone-hdfs/docker-compose.yaml
--
diff --git a/hadoop-dist/src/main/compose/ozone-hdfs/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozone-hdfs/docker-compose.yaml
new file mode 100644
index 000..ed18e5c
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozone-hdfs/docker-compose.yaml
@@ -0,0 +1,60 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF AN

[38/50] [abbrv] hadoop git commit: HDDS-435. Enhance the existing ozone documentation. Contributed by Elek, Marton.

2018-09-17 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af84535/hadoop-ozone/docs/content/Settings.md
--
diff --git a/hadoop-ozone/docs/content/Settings.md 
b/hadoop-ozone/docs/content/Settings.md
new file mode 100644
index 000..bfa644e
--- /dev/null
+++ b/hadoop-ozone/docs/content/Settings.md
@@ -0,0 +1,139 @@
+---
+title: Configuration
+weight: 1
+menu:
+   main:
+  parent: Starting
+  weight: 2
+---
+
+
+
+
+
+If you are feeling adventurous, you can setup ozone in a real cluster.
+Setting up a real cluster requires us to understand the components of Ozone.
+Ozone is designed to work concurrently with HDFS. However, Ozone is also
+capable of running independently. The components of ozone are the same in both 
approaches.
+
+## Ozone Components
+
+1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. 
Ozone Manager is responsible for all volume, bucket and key operations.
+2. Storage Container Manager - Acts as the block manager. Ozone Manager
+requests blocks from SCM, to which clients can write data.
+3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the 
independent deployment case runs an ozone datanode daemon.
+
+
+
+
+## Setting up an Ozone only cluster
+
+* Please untar the  ozone-0.2.1-SNAPSHOT to the directory where you are going
+to run Ozone from. We need Ozone jars on all machines in the cluster. So you
+need to do this on all machines in the cluster.
+
+* Ozone relies on a configuration file called ```ozone-site.xml```. To
+generate a template that you can replace with proper values, please run the
+following command. This will generate a template called ```ozone-site.xml``` at
+the specified path (directory).
+
+{{< highlight bash >}}
+ozone genconf -output 
+{{< /highlight >}}
+
+Let us look at the settings inside the generated file (ozone-site.xml)  and
+how they control ozone. Once the right values are defined, this file
+needs to be copied to ```ozone directory/etc/Hadoop```.
+
+
+* **ozone.enabled** This is the most critical setting for ozone.
+Ozone is a work in progress and users have to enable this service explicitly.
+By default, Ozone is disabled. Setting this flag to `true` enables ozone in the
+HDFS or Ozone cluster.
+
+Here is an example,
+
+{{< highlight xml >}}
+
+   ozone.enabled
+   True
+
+{{< /highlight >}}
+
+* **ozone.metadata.dirs** Allows Administrators to specify where the
+ metadata must reside. Usually you pick your fastest disk (SSD if
+ you have them on your nodes). OzoneManager, SCM and datanode will  write the
+ metadata to this path. This is a required setting, if this is missing Ozone
+ will fail to come up.
+
+  Here is an example,
+
+{{< highlight xml >}}
+   
+  ozone.metadata.dirs
+  /data/disk1/meta
+   
+{{< /highlight >}}
+
+*  **ozone.scm.names**  Storage container manager(SCM) is a distributed block
+  service which is used by ozone. This property allows data nodes to discover
+   SCM's address. Data nodes send heartbeat to SCM.
+   Until HA  feature is  complete, we configure ozone.scm.names to be a
+   single machine.
+
+  Here is an example,
+
+  {{< highlight xml >}}
+  
+ozone.scm.names
+scm.hadoop.apache.org
+  
+  {{< /highlight >}}
+
+ * **ozone.scm.datanode.id** Data nodes generate a Unique ID called Datanode
+ ID. This identity is written to the file specified by this path. *Data nodes
+  will create this path if it doesn't exist already.*
+
+Here is an  example,
+{{< highlight xml >}}
+   
+  ozone.scm.datanode.id
+  /data/disk1/meta/node/datanode.id
+   
+{{< /highlight >}}
+
+* **ozone.om.address** OM server address. This is used by OzoneClient and
+Ozone File System.
+
+Here is an  example,
+{{< highlight xml >}}
+
+   ozone.om.address
+   ozonemanager.hadoop.apache.org
+
+{{< /highlight >}}
+
+
+### Ozone Settings Summary
+
+| Setting| Value| Comment |
+||--|--|
+| ozone.enabled  | true | This enables 
SCM and  containers in HDFS cluster.|
+| ozone.metadata.dirs| file path| The metadata 
will be stored here.|
+| ozone.scm.names| SCM server name  | 
Hostname:port or IP:port address of SCM.  |
+| ozone.scm.block.client.address | SCM server name and port | Used by 
services like OM |
+| ozone.scm.client.address   | SCM server name and port | Used by 
client-side  |
+| ozone.scm.datanode.address | SCM server name and port | Used by 
datanode to talk to SCM  |
+| ozone.om.address 

[02/50] [abbrv] hadoop git commit: YARN-8630. ATSv2 REST APIs should honor filter-entity-list-by-user in non-secure cluster when ACls are enabled. Contributed by Rohith Sharma K S.

2018-09-17 Thread shv
YARN-8630. ATSv2 REST APIs should honor filter-entity-list-by-user in 
non-secure cluster when ACls are enabled. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4bda5e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4bda5e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4bda5e8

Branch: refs/heads/HDFS-12943
Commit: f4bda5e8e9fee6c5a0dda7c79ef14e73aec20e7e
Parents: e084627
Author: Sunil G 
Authored: Thu Sep 13 17:47:02 2018 +0530
Committer: Sunil G 
Committed: Thu Sep 13 17:47:21 2018 +0530

--
 .../reader/TimelineReaderWebServices.java|  4 ++--
 .../reader/TestTimelineReaderWebServicesBasicAcl.java| 11 ---
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4bda5e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index b10b705..3a4ea2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3532,9 +3532,9 @@ public class TimelineReaderWebServices {
   static boolean checkAccess(TimelineReaderManager readerManager,
   UserGroupInformation ugi, String entityUser) {
 if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) {
-  if (ugi != null && !validateAuthUserWithEntityUser(readerManager, ugi,
+  if (!validateAuthUserWithEntityUser(readerManager, ugi,
   entityUser)) {
-String userName = ugi.getShortUserName();
+String userName = ugi == null ? null : ugi.getShortUserName();
 String msg = "User " + userName
 + " is not allowed to read TimelineService V2 data.";
 throw new ForbiddenException(msg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4bda5e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
index 6651457..6ad4427 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
@@ -88,9 +88,14 @@ public class TestTimelineReaderWebServicesBasicAcl {
 Assert.assertFalse(TimelineReaderWebServices
 .validateAuthUserWithEntityUser(manager, null, user1));
 
-// true because ugi is null
-Assert.assertTrue(
-TimelineReaderWebServices.checkAccess(manager, null, user1));
+// false because ugi is null in non-secure cluster. User must pass
+// ?user.name as query params in REST end points.
+try {
+  TimelineReaderWebServices.checkAccess(manager, null, user1);
+  Assert.fail("user1Ugi is not allowed to view user1");
+} catch (ForbiddenException e) {
+  // expected
+}
 
 // incoming ugi is admin asking for entity owner user1
 Assert.assertTrue(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDDS-389. Remove XceiverServer and XceiverClient and related classes. Contributed by chencan.

2018-09-17 Thread shv
HDDS-389. Remove XceiverServer and XceiverClient and related classes. 
Contributed by chencan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1df3084
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1df3084
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1df3084

Branch: refs/heads/HDFS-12943
Commit: c1df3084ffd062a41f05601b928430a0b1a0db47
Parents: 446cb83
Author: Nanda kumar 
Authored: Sat Sep 15 00:18:52 2018 +0530
Committer: Nanda kumar 
Committed: Sat Sep 15 00:20:19 2018 +0530

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   | 209 ---
 .../hadoop/hdds/scm/XceiverClientHandler.java   | 202 --
 .../hdds/scm/XceiverClientInitializer.java  |  74 ---
 .../common/transport/server/XceiverServer.java  | 140 -
 .../transport/server/XceiverServerHandler.java  |  82 
 .../server/XceiverServerInitializer.java|  64 --
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   4 +-
 .../container/metrics/TestContainerMetrics.java |  21 +-
 .../container/server/TestContainerServer.java   |  54 ++---
 9 files changed, 36 insertions(+), 814 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df3084/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
deleted file mode 100644
index 5f2fe26..000
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.ratis.shaded.io.netty.bootstrap.Bootstrap;
-import org.apache.ratis.shaded.io.netty.channel.Channel;
-import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
-import org.apache.ratis.shaded.io.netty.channel.nio.NioEventLoopGroup;
-import org.apache.ratis.shaded.io.netty.channel.socket.nio.NioSocketChannel;
-import org.apache.ratis.shaded.io.netty.handler.logging.LogLevel;
-import org.apache.ratis.shaded.io.netty.handler.logging.LoggingHandler;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public class XceiverClient extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
-  private final Pipeline pipeline;
-  private final Configuration config;
-  private Channel channel;
-  private Bootstrap b;
-  private EventLoopGroup group;
-  private final Semaphore semaphore;
-  private boolean closed = false;
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   *
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config -- Ozone Config
-   */
-  public XceiverClient(Pipeline pipeline, Configuration config) {
-super();
-Preconditions.checkNotNull(pipeline);
-Preconditions.checkNotNull(config);
-this.pipeline = pipeline;
-this.config = config;
-this.semaphore =
-new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
-  }
-
-  @Override
-  public void c

[35/50] [abbrv] hadoop git commit: YARN-8782. Fix exception message in Resource.throwExceptionWhenArrayOutOfBound. Contributed by Gergely Pollak.

2018-09-17 Thread shv
YARN-8782. Fix exception message in Resource.throwExceptionWhenArrayOutOfBound. 
Contributed by Gergely Pollak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a265fa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a265fa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a265fa6

Branch: refs/heads/HDFS-12943
Commit: 9a265fa673ef1b8774cfd69c76cdd29bf344e79d
Parents: 33d8327
Author: Weiwei Yang 
Authored: Mon Sep 17 22:15:24 2018 +0800
Committer: Weiwei Yang 
Committed: Mon Sep 17 22:15:24 2018 +0800

--
 .../src/main/java/org/apache/hadoop/yarn/api/records/Resource.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a265fa6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 1a7252d..7740354 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -397,7 +397,7 @@ public abstract class Resource implements 
Comparable {
   protected void throwExceptionWhenArrayOutOfBound(int index) {
 String exceptionMsg = String.format(
 "Trying to access ResourceInformation for given index=%d. "
-+ "Acceptable index range is [0,%d), please check double check "
++ "Acceptable index range is [0,%d), please double check "
 + "configured resources in resource-types.xml",
 index, ResourceUtils.getNumberOfKnownResourceTypes());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDDS-462. Optimize ContainerStateMap#getMatchingContainerIDs in SCM. Contributed by Nanda kumar.

2018-09-17 Thread shv
HDDS-462. Optimize ContainerStateMap#getMatchingContainerIDs in SCM. 
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9fa0818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9fa0818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9fa0818

Branch: refs/heads/HDFS-12943
Commit: c9fa081897df34dba1c2989f597e67a1f384a4e3
Parents: a65c3ea
Author: Nanda kumar 
Authored: Sat Sep 15 23:11:39 2018 +0530
Committer: Nanda kumar 
Committed: Sat Sep 15 23:11:39 2018 +0530

--
 .../hadoop/hdds/scm/container/ContainerID.java  |  26 +++--
 .../container/common/helpers/ContainerInfo.java |  10 +-
 .../scm/container/ContainerStateManager.java|   4 +-
 .../scm/container/states/ContainerQueryKey.java | 110 +++
 .../scm/container/states/ContainerStateMap.java |  42 ++-
 .../scm/node/states/TestNode2ContainerMap.java  |   7 +-
 .../genesis/BenchMarkContainerStateMap.java |  24 +++-
 .../genesis/BenchMarkDatanodeDispatcher.java|  42 +++
 .../apache/hadoop/ozone/genesis/Genesis.java|   9 +-
 9 files changed, 224 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9fa0818/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index 9845c04..49af297 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.math3.util.MathUtils;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
 
 /**
  * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
@@ -48,7 +50,6 @@ public class ContainerID implements Comparable {
* @return ContainerID.
*/
   public static ContainerID valueof(long containerID) {
-Preconditions.checkState(containerID > 0);
 return new ContainerID(containerID);
   }
 
@@ -66,28 +67,37 @@ public class ContainerID implements Comparable {
 if (this == o) {
   return true;
 }
+
 if (o == null || getClass() != o.getClass()) {
   return false;
 }
 
 ContainerID that = (ContainerID) o;
 
-return id == that.id;
+return new EqualsBuilder()
+.append(getId(), that.getId())
+.isEquals();
   }
 
   @Override
   public int hashCode() {
-return MathUtils.hash(id);
+return new HashCodeBuilder(61, 71)
+.append(getId())
+.toHashCode();
   }
 
   @Override
   public int compareTo(Object o) {
 Preconditions.checkNotNull(o);
-if (o instanceof ContainerID) {
-  return Long.compare(((ContainerID) o).getId(), this.getId());
+if(getClass() != o.getClass()) {
+  throw new ClassCastException("ContainerID class expected. found:" +
+  o.getClass().toString());
 }
-throw new IllegalArgumentException("Object O, should be an instance " +
-"of ContainerID");
+
+ContainerID that = (ContainerID) o;
+return new CompareToBuilder()
+.append(this.getId(), that.getId())
+.build();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9fa0818/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index ed0e0aa..5abcd14 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -106,6 +106,13 @@ public class ContainerInfo implements 
Comparator,
 this.replicationType = repType;
   }
 
+  public ContainerInfo(ContainerInfo info) {
+this(info.getContainerID(), info.getState(), info.getPipelineID(),
+info.getAllocatedBytes(), info.getUsedBytes(), info.getNumberOfKeys(),
+info.getStateEnterTime(), info.getOwner(),
+info.getDeleteTransactionId(), info.getReplicationFactor(),
+info.getReplicationType());
+  }
   /**

[43/50] [abbrv] hadoop git commit: HDFS-13844. Fix the fmt_bytes function in the dfs-dust.js. Contributed by yanghuafeng.

2018-09-17 Thread shv
HDFS-13844. Fix the fmt_bytes function in the dfs-dust.js. Contributed by 
yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1541932
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1541932
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1541932

Branch: refs/heads/HDFS-12943
Commit: d1541932dbf2efd09da251b23c8825ce97f9c86c
Parents: 23a6137
Author: Inigo Goiri 
Authored: Mon Sep 17 14:37:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Sep 17 14:42:03 2018 -0700

--
 .../hadoop-hdfs/src/main/webapps/static/dfs-dust.js  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1541932/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
index a572282..316a994 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
@@ -20,7 +20,7 @@
 
   var filters = {
 'fmt_bytes': function (v) {
-  var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB'];
+  var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'];
   var prev = 0, i = 0;
   while (Math.floor(v) > 0 && i < UNITS.length) {
 prev = v;
@@ -28,7 +28,7 @@
 i += 1;
   }
 
-  if (i > 0 && i < UNITS.length) {
+  if (i > 0) {
 v = prev;
 i -= 1;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDDS-409. Ozone acceptance-test and integration-test packages have undefined hadoop component. Contributed by Dinesh Chitlangia.

2018-09-17 Thread shv
HDDS-409. Ozone acceptance-test and integration-test packages have undefined 
hadoop component. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/985f3bf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/985f3bf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/985f3bf3

Branch: refs/heads/HDFS-12943
Commit: 985f3bf3fb2e9ba6cccf0420cd91cb4b9394d750
Parents: b95aa56
Author: Márton Elek 
Authored: Sat Sep 15 13:16:59 2018 +0200
Committer: Márton Elek 
Committed: Sat Sep 15 13:21:32 2018 +0200

--
 hadoop-ozone/acceptance-test/pom.xml  | 6 ++
 hadoop-ozone/integration-test/pom.xml | 5 +
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/985f3bf3/hadoop-ozone/acceptance-test/pom.xml
--
diff --git a/hadoop-ozone/acceptance-test/pom.xml 
b/hadoop-ozone/acceptance-test/pom.xml
index fc11c07..a60d4b0 100644
--- a/hadoop-ozone/acceptance-test/pom.xml
+++ b/hadoop-ozone/acceptance-test/pom.xml
@@ -27,6 +27,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   Apache Hadoop Ozone Acceptance Tests
   Apache Hadoop Ozone Acceptance Tests
   pom
+
+  
+ozone
+true
+  
+
   
 
   ozone-acceptance-test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/985f3bf3/hadoop-ozone/integration-test/pom.xml
--
diff --git a/hadoop-ozone/integration-test/pom.xml 
b/hadoop-ozone/integration-test/pom.xml
index d7a3bc0..993e91f 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -28,6 +28,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   Apache Hadoop Ozone Integration Tests
   jar
 
+  
+ozone
+true
+  
+
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDDS-487. Doc files are missing ASF license headers. Contributed by Namit Maheshwari.

2018-09-17 Thread shv
HDDS-487. Doc files are missing ASF license headers. Contributed by Namit 
Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a26c521
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a26c521
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a26c521

Branch: refs/heads/HDFS-12943
Commit: 0a26c521f0e5d3a2f7d40e07f11fe0a26765bc41
Parents: d154193
Author: Arpit Agarwal 
Authored: Mon Sep 17 16:21:10 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Sep 17 16:21:10 2018 -0700

--
 hadoop-ozone/docs/README.md | 13 +++-
 hadoop-ozone/docs/archetypes/default.md | 13 +++-
 hadoop-ozone/docs/content/BucketCommands.md | 16 +++
 hadoop-ozone/docs/content/BuildingSources.md| 17 
 hadoop-ozone/docs/content/CommandShell.md   | 13 +++-
 hadoop-ozone/docs/content/Concepts.md   | 21 +---
 hadoop-ozone/docs/content/Dozone.md | 13 +++-
 hadoop-ozone/docs/content/Freon.md  | 13 +++-
 hadoop-ozone/docs/content/Hdds.md   | 16 +++
 hadoop-ozone/docs/content/JavaApi.md| 16 +++
 hadoop-ozone/docs/content/KeyCommands.md| 16 +++
 hadoop-ozone/docs/content/OzoneFS.md| 16 +++
 hadoop-ozone/docs/content/OzoneManager.md   | 16 +++
 hadoop-ozone/docs/content/RealCluster.md| 13 +++-
 hadoop-ozone/docs/content/Rest.md   | 13 +++-
 hadoop-ozone/docs/content/RunningViaDocker.md   | 13 +++-
 hadoop-ozone/docs/content/RunningWithHDFS.md| 16 +++
 hadoop-ozone/docs/content/SCMCLI.md | 13 +++-
 hadoop-ozone/docs/content/Settings.md   | 13 +++-
 hadoop-ozone/docs/content/VolumeCommands.md | 16 +++
 hadoop-ozone/docs/content/_index.md | 13 +++-
 hadoop-ozone/docs/static/NOTES.md   | 13 +++-
 .../ozonedoc/layouts/_default/single.html   | 13 +++-
 .../docs/themes/ozonedoc/layouts/index.html | 16 +++
 .../ozonedoc/layouts/partials/footer.html   | 13 +++-
 .../ozonedoc/layouts/partials/header.html   | 13 +++-
 .../ozonedoc/layouts/partials/navbar.html   | 13 +++-
 .../ozonedoc/layouts/partials/sidebar.html  | 13 +++-
 28 files changed, 311 insertions(+), 92 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a26c521/hadoop-ozone/docs/README.md
--
diff --git a/hadoop-ozone/docs/README.md b/hadoop-ozone/docs/README.md
index 426789f..85817a7 100644
--- a/hadoop-ozone/docs/README.md
+++ b/hadoop-ozone/docs/README.md
@@ -1,15 +1,18 @@
 
 # Hadoop Ozone/HDDS docs
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a26c521/hadoop-ozone/docs/archetypes/default.md
--
diff --git a/hadoop-ozone/docs/archetypes/default.md 
b/hadoop-ozone/docs/archetypes/default.md
index e67e68a..f4cc999 100644
--- a/hadoop-ozone/docs/archetypes/default.md
+++ b/hadoop-ozone/docs/archetypes/default.md
@@ -3,15 +3,18 @@ title: "{{ replace .Name "-" " " | title }}"
 menu: main
 ---
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a26c521/hadoop-ozone/docs/content/BucketCommands.md
--
diff --git a/hadoop-ozone/docs/content/BucketCommands.md 
b/hadoop-ozone/docs/content/BucketCommands.md
index dad11e3..3ab3505 100644
--- a/hadoop-ozone/docs/content/BucketCommands.md
+++ b/hadoop-ozone/docs/content/BucketCommands.md
@@ -5,6 +5,22 @@ menu:
   parent: Client
   weight: 3
 ---
+
 
 Ozone shell supports the following bucket commands.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a26c521/hadoop-ozone/docs/content/BuildingSources.md
--
diff --git a/hadoop-ozone/docs/content/BuildingSources.md 
b/hadoop-ozone/docs/content/BuildingSources.md
index 2cad55b..1953f47 100644
--- a/hadoop-ozone/docs/content/BuildingSources.md
+++ b/hadoop-ozone/docs/content/BuildingSources.md
@@ -6,6 +6,23 @@ menu:
   parent: Starting
   weight: 5
 ---
+
+
 ***This is a guide on how to build the ozone sources.  If you are not
 planning to build sources yourself, you can safely skip this page.***

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a26c521/hadoop-ozone/docs/content/CommandShell.md
--
diff --git a/hadoop-ozone/docs/content/CommandShell.md 
b/hadoop-ozone/docs/content/CommandShell.md
index bfb2d39..74

[36/50] [abbrv] hadoop git commit: HDDS-399. Persist open pipeline information across SCM restart. Contributed by Mukul Kumar Singh.

2018-09-17 Thread shv
HDDS-399. Persist open pipeline information across SCM restart. Contributed by 
Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84693669
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84693669
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84693669

Branch: refs/heads/HDFS-12943
Commit: 846936698b2c8c50662e43534ac999df82066a8b
Parents: 9a265fa
Author: Nanda kumar 
Authored: Mon Sep 17 21:51:54 2018 +0530
Committer: Nanda kumar 
Committed: Mon Sep 17 21:51:54 2018 +0530

--
 .../scm/container/common/helpers/Pipeline.java  |  24 ++
 .../org/apache/hadoop/ozone/OzoneConsts.java|   2 +
 .../hdds/scm/container/ContainerMapping.java|  24 +-
 .../scm/container/ContainerStateManager.java|  25 +-
 .../scm/container/states/ContainerStateMap.java |  38 ---
 .../hdds/scm/pipelines/PipelineManager.java | 148 +--
 .../hdds/scm/pipelines/PipelineSelector.java| 249 +--
 .../scm/pipelines/PipelineStateManager.java | 136 ++
 .../scm/pipelines/ratis/RatisManagerImpl.java   |   8 +-
 .../standalone/StandaloneManagerImpl.java   |   8 +-
 .../container/TestContainerReportHandler.java   |   3 +-
 .../container/TestContainerStateManager.java|   4 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  |   4 +-
 .../TestContainerStateManagerIntegration.java   |  10 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  22 +-
 .../hdds/scm/pipeline/TestPipelineClose.java|  15 +-
 .../hdds/scm/pipeline/TestSCMRestart.java   | 101 
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   5 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   8 +-
 19 files changed, 510 insertions(+), 324 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84693669/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index 6757262..ef148e5 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -86,6 +86,30 @@ public class Pipeline {
 datanodes = new TreeMap<>();
   }
 
+  @Override
+  public int hashCode() {
+return id.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+Pipeline that = (Pipeline) o;
+
+return id.equals(that.id)
+&& factor.equals(that.factor)
+&& type.equals(that.type)
+&& lifeCycleState.equals(that.lifeCycleState)
+&& leaderID.equals(that.leaderID);
+
+  }
+
   /**
* Gets pipeline object from protobuf.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84693669/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index bf4508b..0a15ec8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -90,7 +90,9 @@ public final class OzoneConsts {
* level DB names used by SCM and data nodes.
*/
   public static final String CONTAINER_DB_SUFFIX = "container.db";
+  public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
+  public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84693669/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 5678205..11cc9ee 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.

[45/50] [abbrv] hadoop git commit: Merge commit 'e780556ae9229fe7a90817eb4e5449d7eed35dd8' into HDFS-12943

2018-09-17 Thread shv
Merge commit 'e780556ae9229fe7a90817eb4e5449d7eed35dd8' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94d7f90e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94d7f90e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94d7f90e

Branch: refs/heads/HDFS-12943
Commit: 94d7f90e93beca22476504b0cdb3e6bd05f05888
Parents: c2e0e9a e780556
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 17:26:28 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Sep 17 17:26:28 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java |  26 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  14 +
 .../org/apache/hadoop/http/TestHttpServer.java  |  14 +
 .../java/org/apache/hadoop/ipc/TestIPC.java |  45 ++
 .../src/main/resources/kms-default.xml  |   7 +
 .../src/main/compose/ozone/docker-config|  37 ++
 .../common/dev-support/findbugsExcludeFile.xml  |   4 +
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  82 +++
 .../hadoop/hdds/cli/HddsVersionProvider.java|  35 ++
 .../apache/hadoop/hdds/cli/package-info.java|  22 +
 .../helpers/BlockNotCommittedException.java |  36 ++
 .../scm/storage/ContainerProtocolCalls.java |   5 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  11 +
 .../org/apache/hadoop/ozone/OzoneConsts.java|  39 +-
 .../apache/hadoop/ozone/audit/package-info.java |   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   2 +-
 .../org/apache/hadoop/utils/db/DBStore.java |  22 +
 .../org/apache/hadoop/utils/db/RDBStore.java|  26 +-
 .../main/proto/DatanodeContainerProtocol.proto  |  12 +-
 .../common/src/main/resources/ozone-default.xml |  30 +-
 .../container/common/interfaces/Handler.java|  15 +-
 .../statemachine/DatanodeStateMachine.java  |  25 +-
 .../ReplicateContainerCommandHandler.java   | 124 -
 .../transport/server/XceiverServerGrpc.java |  12 +-
 .../container/keyvalue/KeyValueContainer.java   |   3 +-
 .../container/keyvalue/KeyValueHandler.java | 100 ++--
 .../container/ozoneimpl/OzoneContainer.java |   9 +-
 .../replication/ContainerDownloader.java|  40 ++
 .../replication/ContainerReplicationSource.java |  49 ++
 .../replication/ContainerStreamingOutput.java   |  45 ++
 .../replication/GrpcReplicationClient.java  | 169 ++
 .../replication/GrpcReplicationService.java | 130 +
 .../OnDemandContainerReplicationSource.java |  76 +++
 .../replication/SimpleContainerDownloader.java  | 121 
 .../container/replication/package-info.java |  21 +
 .../TestReplicateContainerCommandHandler.java   | 146 +
 .../commandhandler/package-info.java|  22 +
 .../apache/hadoop/hdds/server/ServerUtils.java  |   5 +
 .../hadoop/hdds/server/events/EventWatcher.java |  20 +-
 hadoop-hdds/pom.xml |   5 +
 .../scm/command/CommandStatusReportHandler.java |   6 +-
 .../container/CloseContainerEventHandler.java   |  26 +
 .../scm/container/CloseContainerWatcher.java| 100 
 .../hdds/scm/container/ContainerMapping.java|  13 +
 .../hadoop/hdds/scm/events/SCMEvents.java   |  11 +
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  41 +-
 .../hdds/scm/node/states/Node2ContainerMap.java |  29 +-
 .../scm/server/StorageContainerManager.java |  11 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  | 112 
 .../container/TestCloseContainerWatcher.java| 287 ++
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java   |  43 --
 .../hdds/scm/cli/OzoneCommandHandler.java   |  87 ---
 .../apache/hadoop/hdds/scm/cli/ResultCode.java  |  31 --
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  | 246 +++--
 .../cli/container/CloseContainerHandler.java|  85 ---
 .../hdds/scm/cli/container/CloseSubcommand.java |  54 ++
 .../cli/container/ContainerCommandHandler.java  | 128 -
 .../cli/container/CreateContainerHandler.java   |  67 ---
 .../scm/cli/container/CreateSubcommand.java |  65 +++
 .../cli/container/DeleteContainerHandler.java   |  95 
 .../scm/cli/container/DeleteSubcommand.java |  60 ++
 .../scm/cli/container/InfoContainerHandler.java | 114 
 .../hdds/scm/cli/container/InfoSubcommand.java  |  94 
 .../scm/cli/container/ListContainerHandler.java | 117 
 .../hdds/scm/cli/container/ListSubcommand.java  |  83 +++
 .../hdds/scm/cli/container/package-info.java|   3 +
 .../hadoop/hdds/scm/cli/package-info.java   |  12 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +
 .../src/main/resources/httpfs-default.xml   |   7 +
 .../federation/resolver/MountTableResolver.java |  39 +-
 .../server/federation/router/RBFConfigKeys.java |   4 +
 .../federation/router/RouterRpcServer.java  |   2 +-
 .../hdfs/tools/federation/RouterAdmin.java  |  55 +-
 .../src/main/resources/hdfs-rbf-default.xml |   8 +
 .../resolve

[41/50] [abbrv] hadoop git commit: HADOOP-15754. s3guard: testDynamoTableTagging should clear existing config. Contributed by Gabor Bota.

2018-09-17 Thread shv
HADOOP-15754. s3guard: testDynamoTableTagging should clear existing config.
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26d0c63a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26d0c63a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26d0c63a

Branch: refs/heads/HDFS-12943
Commit: 26d0c63a1e2eea6558fca2c55c134c02ecc93bf8
Parents: 3d89c3e
Author: Steve Loughran 
Authored: Mon Sep 17 22:40:08 2018 +0100
Committer: Steve Loughran 
Committed: Mon Sep 17 22:40:08 2018 +0100

--
 .../fs/s3a/s3guard/AbstractS3GuardToolTestBase.java |  8 +---
 .../fs/s3a/s3guard/ITestDynamoDBMetadataStore.java  |  6 ++
 .../fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java| 16 +++-
 3 files changed, 26 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d0c63a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 242f616..632676f 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -300,16 +300,18 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   @Test
   public void testSetCapacityFailFastOnReadWriteOfZero() throws Exception{
 Configuration conf = getConfiguration();
+String bucket = getFileSystem().getBucket();
 conf.set(S3GUARD_DDB_TABLE_NAME_KEY, getFileSystem().getBucket());
 
 S3GuardTool.SetCapacity cmdR = new S3GuardTool.SetCapacity(conf);
-String[] argsR = new String[]{cmdR.getName(), "-read", "0", 
"s3a://bucket"};
+String[] argsR =
+new String[]{cmdR.getName(), "-read", "0", "s3a://" + bucket};
 intercept(IllegalArgumentException.class,
 S3GuardTool.SetCapacity.READ_CAP_INVALID, () -> cmdR.run(argsR));
 
 S3GuardTool.SetCapacity cmdW = new S3GuardTool.SetCapacity(conf);
-String[] argsW = new String[]{cmdW.getName(), "-write", "0",
-"s3a://bucket"};
+String[] argsW =
+new String[]{cmdW.getName(), "-write", "0", "s3a://" + bucket};
 intercept(IllegalArgumentException.class,
 S3GuardTool.SetCapacity.WRITE_CAP_INVALID, () -> cmdW.run(argsW));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d0c63a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
index a8425bf..5355910 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
@@ -630,6 +630,12 @@ public class ITestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
   @Test
   public void testTableTagging() throws IOException {
 final Configuration conf = getFileSystem().getConf();
+
+// clear all table tagging config before this test
+conf.getPropsWithPrefix(S3GUARD_DDB_TABLE_TAG).keySet().forEach(
+propKey -> conf.unset(S3GUARD_DDB_TABLE_TAG + propKey)
+);
+
 String tableName = "testTableTagging-" + UUID.randomUUID();
 conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
 conf.set(S3GUARD_DDB_TABLE_CREATE_KEY, "true");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d0c63a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index 66a8239..65e2619 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -44,7 +44,9 @@ import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.Destroy;
 import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.Init;
 import org.apache.hadoop.test.L

[42/50] [abbrv] hadoop git commit: HDDS-475. Block Allocation returns same BlockID on different keys creation. Contributed by Nanda Kumar.

2018-09-17 Thread shv
HDDS-475. Block Allocation returns same BlockID on different keys creation.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23a6137a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23a6137a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23a6137a

Branch: refs/heads/HDFS-12943
Commit: 23a6137a40b34742714e0140268c491e1da6db6d
Parents: 26d0c63
Author: Anu Engineer 
Authored: Mon Sep 17 14:08:39 2018 -0700
Committer: Anu Engineer 
Committed: Mon Sep 17 14:41:17 2018 -0700

--
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 202 +++
 1 file changed, 117 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23a6137a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 8322b73..3405b0d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -48,8 +48,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
 .CHILL_MODE_EXCEPTION;
@@ -76,7 +74,6 @@ public class BlockManagerImpl implements 
EventHandler,
   private final NodeManager nodeManager;
   private final Mapping containerManager;
 
-  private final ReadWriteLock lock;
   private final long containerSize;
 
   private final DeletedBlockLog deletedBlockLog;
@@ -113,7 +110,6 @@ public class BlockManagerImpl implements 
EventHandler,
 ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE,
 ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT);
 rand = new Random();
-this.lock = new ReentrantReadWriteLock();
 
 mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
 
@@ -223,74 +219,29 @@ public class BlockManagerImpl implements 
EventHandler,
 
 ContainerWithPipeline containerWithPipeline;
 
-lock.readLock().lock();
-try {
-  // This is to optimize performance, if the below condition is evaluated
-  // to false, then we can be sure that there are no containers in
-  // ALLOCATED state.
-  // This can result in false positive, but it will never be false 
negative.
-  // How can this result in false positive? We check if there are any
-  // containers in ALLOCATED state, this check doesn't care about the
-  // USER of the containers. So there might be cases where a different
-  // USER has few containers in ALLOCATED state, which will result in
-  // false positive.
-  if (!containerManager.getStateManager().getContainerStateMap()
-  .getContainerIDsByState(HddsProtos.LifeCycleState.ALLOCATED)
-  .isEmpty()) {
-// Since the above check can result in false positive, we have to do
-// the actual check and find out if there are containers in ALLOCATED
-// state matching our criteria.
-synchronized (this) {
-  // Using containers from ALLOCATED state should be done within
-  // synchronized block (or) write lock. Since we already hold a
-  // read lock, we will end up in deadlock situation if we take
-  // write lock here.
-  containerWithPipeline = containerManager
-  .getMatchingContainerWithPipeline(size, owner, type, factor,
-  HddsProtos.LifeCycleState.ALLOCATED);
-  if (containerWithPipeline != null) {
-containerManager.updateContainerState(
-containerWithPipeline.getContainerInfo().getContainerID(),
-HddsProtos.LifeCycleEvent.CREATE);
-return newBlock(containerWithPipeline,
-HddsProtos.LifeCycleState.ALLOCATED);
-  }
-}
-  }
-
-  // Since we found no allocated containers that match our criteria, let us
-  // look for OPEN containers that match the criteria.
-  containerWithPipeline = containerManager
-  .getMatchingContainerWithPipeline(size, owner, type, factor,
-  HddsProtos.LifeCycleState.OPEN);
-  if (containerWithPipeline != null) {
-return newBlock(containerWithPipeline, HddsProtos.LifeCycleState.OPEN);
-  }
-
-  // We found neither ALLOCATED or OPEN Containers. This gen

[37/50] [abbrv] hadoop git commit: HDFS-13919. Documentation: Improper formatting in Disk Balancer for Settings. Contributed by Ayush Saxena.

2018-09-17 Thread shv
HDFS-13919. Documentation: Improper formatting in Disk Balancer for Settings.
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdf5a3fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdf5a3fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdf5a3fd

Branch: refs/heads/HDFS-12943
Commit: fdf5a3fd63a24b2cb2acafbc30ae4f993ff33145
Parents: 8469366
Author: Anu Engineer 
Authored: Mon Sep 17 10:08:23 2018 -0700
Committer: Anu Engineer 
Committed: Mon Sep 17 10:08:23 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdf5a3fd/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index ed0233a..5dd6ffc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -127,6 +127,7 @@ There is a set of diskbalancer settings that can be 
controlled via hdfs-site.xml
 |`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent specifies 
when we have reached a good enough value for any copy step. For example, if you 
specify 10% then getting close to 10% of the target value is good enough.|
 |`dfs.disk.balancer.plan.threshold.percent`| The percentage threshold value 
for volume Data Density in a plan. If the absolute value of volume Data Density 
which is out of threshold value in a node, it means that the volumes 
corresponding to the disks should do the balancing in the plan. The default 
value is 10.|
 |`dfs.disk.balancer.plan.valid.interval`| Maximum amount of time disk balancer 
plan is valid. Supports the following suffixes (case insensitive): ms(millis), 
s(sec), m(min), h(hour), d(day) to specify the time (such as 2s, 2m, 1h, etc.). 
If no suffix is specified then milliseconds is assumed. Default value is 1d|
+
  Debugging
 -
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   4   5   6   7   8   9   10   >