[48/50] [abbrv] hadoop git commit: HDDS-192:Create new SCMCommand to request a replication of a container. Contributed by Elek Marton

2018-06-25 Thread xkrogen
HDDS-192:Create new SCMCommand to request a replication of a container. 
Contributed by Elek Marton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/238fe00a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/238fe00a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/238fe00a

Branch: refs/heads/HDFS-12943
Commit: 238fe00ad2692154f6a382f35735169ee5e4af2c
Parents: 35ec940
Author: Bharat Viswanadham 
Authored: Mon Jun 25 21:12:05 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Jun 25 21:12:05 2018 -0700

--
 .../statemachine/DatanodeStateMachine.java  |  3 +
 .../ReplicateContainerCommandHandler.java   | 67 ++
 .../states/endpoint/HeartbeatEndpointTask.java  | 12 +++
 .../commands/ReplicateContainerCommand.java | 94 
 .../StorageContainerDatanodeProtocol.proto  | 12 ++-
 .../scm/server/SCMDatanodeProtocolServer.java   | 11 +++
 .../TestReplicateContainerHandler.java  | 71 +++
 7 files changed, 269 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/238fe00a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index dc4e673..b073d7b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -32,6 +32,8 @@ import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .CommandDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .DeleteBlocksCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
+.ReplicateContainerCommandHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
@@ -95,6 +97,7 @@ public class DatanodeStateMachine implements Closeable {
 .addHandler(new CloseContainerCommandHandler())
 .addHandler(new DeleteBlocksCommandHandler(
 container.getContainerManager(), conf))
+.addHandler(new ReplicateContainerCommandHandler())
 .setConnectionManager(connectionManager)
 .setContainer(container)
 .setContext(context)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/238fe00a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
new file mode 100644
index 000..b4e83b7
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import 

[34/50] [abbrv] hadoop git commit: YARN-8184. Too many metrics if containerLocalizer/ResourceLocalizationService uses ReadWriteDiskValidator. Contributed by Yufei Gu

2018-06-25 Thread xkrogen
YARN-8184. Too many metrics if containerLocalizer/ResourceLocalizationService 
uses ReadWriteDiskValidator. Contributed by Yufei Gu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cdce86d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cdce86d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cdce86d

Branch: refs/heads/HDFS-12943
Commit: 1cdce86d33d4b73ba6dd4136c966eb7e822b6f36
Parents: ae05562
Author: Yufei Gu 
Authored: Fri Jun 22 14:02:32 2018 -0700
Committer: Yufei Gu 
Committed: Fri Jun 22 14:03:55 2018 -0700

--
 .../containermanager/localizer/ContainerLocalizer.java  | 5 +
 .../containermanager/localizer/ResourceLocalizationService.java | 5 +
 2 files changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cdce86d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 6a384ae..c034369 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -132,10 +132,7 @@ public class ContainerLocalizer {
 this.recordFactory = recordFactory;
 this.conf = new YarnConfiguration();
 this.diskValidator = DiskValidatorFactory.getInstance(
-conf.get(YarnConfiguration.DISK_VALIDATOR,
-YarnConfiguration.DEFAULT_DISK_VALIDATOR));
-LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR +
-" is loaded.");
+YarnConfiguration.DEFAULT_DISK_VALIDATOR);
 this.appCacheDirContextName = String.format(APPCACHE_CTXT_FMT, appId);
 this.pendingResources = new HashMap>();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cdce86d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index ddae2ae..3f0a6fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -264,10 +264,7 @@ public class ResourceLocalizationService extends 
CompositeService
 }
 
 diskValidator = DiskValidatorFactory.getInstance(
-conf.get(YarnConfiguration.DISK_VALIDATOR,
-YarnConfiguration.DEFAULT_DISK_VALIDATOR));
-LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR +
-" is loaded.");
+YarnConfiguration.DEFAULT_DISK_VALIDATOR);
 cacheTargetSize =
   conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, 
YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20;
 cacheCleanupPeriod =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HADOOP-15527. Improve delay check for stopping processes. Fixed script location. Contributed by Vinod Kumar Vavilapalli

2018-06-25 Thread xkrogen
HADOOP-15527.  Improve delay check for stopping processes.
   Fixed script location.
   Contributed by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d87592f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d87592f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d87592f

Branch: refs/heads/HDFS-12943
Commit: 2d87592fc6a56bfe77dd3c11953caea2b701c846
Parents: f386e78
Author: Eric Yang 
Authored: Tue Jun 19 13:38:13 2018 -0400
Committer: Eric Yang 
Committed: Tue Jun 19 13:38:13 2018 -0400

--
 .../test/scripts/process_with_sigterm_trap.sh   | 24 
 .../test/scripts/process_with_sigterm_trap.sh   | 24 
 2 files changed, 24 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d87592f/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
 
b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
new file mode 100644
index 000..d7c7427
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+trap "echo SIGTERM trapped!" SIGTERM
+trap "echo SIGINT  trapped!" SIGINT
+
+echo "$$" > "$1"
+
+while true; do
+  sleep 1.3
+done

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d87592f/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
--
diff --git a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh 
b/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
deleted file mode 100644
index d7c7427..000
--- a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-trap "echo SIGTERM trapped!" SIGTERM
-trap "echo SIGINT  trapped!" SIGINT
-
-echo "$$" > "$1"
-
-while true; do
-  sleep 1.3
-done


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: HDFS-13682. Cannot create encryption zone after KMS auth token expires.

2018-06-25 Thread xkrogen
HDFS-13682. Cannot create encryption zone after KMS auth token expires.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32f867a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32f867a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32f867a6

Branch: refs/heads/HDFS-12943
Commit: 32f867a6a907c05a312657139d295a92756d98ef
Parents: b089a06
Author: Xiao Chen 
Authored: Wed Jun 20 15:58:01 2018 -0700
Committer: Xiao Chen 
Committed: Wed Jun 20 15:58:26 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   |  9 --
 .../hadoop/security/UserGroupInformation.java   |  4 ++-
 .../hdfs/TestSecureEncryptionZoneWithKMS.java   | 34 ++--
 3 files changed, 41 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32f867a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 08787a5..edbf897 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -32,7 +32,9 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -543,7 +545,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 String requestMethod = conn.getRequestMethod();
 URL url = conn.getURL();
 conn = createConnection(url, requestMethod);
-conn.setRequestProperty(CONTENT_TYPE, contentType);
+if (contentType != null && !contentType.isEmpty()) {
+  conn.setRequestProperty(CONTENT_TYPE, contentType);
+}
 return call(conn, jsonOutput, expectedResponse, klass,
 authRetryCount - 1);
   }
@@ -1087,8 +1091,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   actualUgi = currentUgi.getRealUser();
 }
 if (UserGroupInformation.isSecurityEnabled() &&
-!containsKmsDt(actualUgi) &&
-!actualUgi.hasKerberosCredentials()) {
+!containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) {
   // Use login user is only necessary when Kerberos is enabled
   // but the actual user does not have either
   // Kerberos credential or KMS delegation token for KMS operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32f867a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 3872810..29b9fea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -831,7 +831,9 @@ public class UserGroupInformation {
 return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
   }
 
-  private boolean shouldRelogin() {
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public boolean shouldRelogin() {
 return hasKerberosCredentials() && isHadoopLogin();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32f867a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
index 7c4763c..db97c02 100644
--- 

[37/50] [abbrv] hadoop git commit: HDDS-177. Create a releasable ozonefs artifact Contributed by Marton, Elek.

2018-06-25 Thread xkrogen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
deleted file mode 100644
index 3156eb2..000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import java.io.IOException;
-
-/**
- * Ozone contract test for ROOT directory operations.
- */
-public class ITestOzoneContractRootDir extends
-AbstractContractRootDirectoryTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-return new OzoneContract(conf);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
deleted file mode 100644
index c4bc0ff..000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering file seek.
- */
-public class ITestOzoneContractSeek extends AbstractContractSeekTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
 

[45/50] [abbrv] hadoop git commit: HADOOP-15423. Merge fileCache and dirCache into ine single cache in LocalMetadataStore. Contributed by Gabor Bota.

2018-06-25 Thread xkrogen
HADOOP-15423. Merge fileCache and dirCache into ine single cache in 
LocalMetadataStore. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c687a661
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c687a661
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c687a661

Branch: refs/heads/HDFS-12943
Commit: c687a6617d73293019d8d91ac48bbfd2ccca3b40
Parents: a55d6bb
Author: Sean Mackrory 
Authored: Mon Jun 25 11:04:34 2018 -0600
Committer: Sean Mackrory 
Committed: Mon Jun 25 14:59:41 2018 -0600

--
 .../fs/s3a/s3guard/LocalMetadataEntry.java  |  81 ++
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 247 +++
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |   2 +-
 .../fs/s3a/s3guard/TestLocalMetadataStore.java  |  33 ++-
 4 files changed, 240 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c687a661/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
new file mode 100644
index 000..6040d67
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import javax.annotation.Nullable;
+
+/**
+ * LocalMetadataEntry is used to store entries in the cache of
+ * LocalMetadataStore. PathMetadata or dirListingMetadata can be null. The
+ * entry is not immutable.
+ */
+public final class LocalMetadataEntry {
+  @Nullable
+  private PathMetadata pathMetadata;
+  @Nullable
+  private DirListingMetadata dirListingMetadata;
+
+  LocalMetadataEntry(PathMetadata pmd){
+pathMetadata = pmd;
+dirListingMetadata = null;
+  }
+
+  LocalMetadataEntry(DirListingMetadata dlm){
+pathMetadata = null;
+dirListingMetadata = dlm;
+  }
+
+  public PathMetadata getFileMeta() {
+return pathMetadata;
+  }
+
+  public DirListingMetadata getDirListingMeta() {
+return dirListingMetadata;
+  }
+
+
+  public boolean hasPathMeta() {
+return this.pathMetadata != null;
+  }
+
+  public boolean hasDirMeta() {
+return this.dirListingMetadata != null;
+  }
+
+  public void setPathMetadata(PathMetadata pathMetadata) {
+this.pathMetadata = pathMetadata;
+  }
+
+  public void setDirListingMetadata(DirListingMetadata dirListingMetadata) {
+this.dirListingMetadata = dirListingMetadata;
+  }
+
+  @Override public String toString() {
+StringBuilder sb = new StringBuilder();
+sb.append("LocalMetadataEntry{");
+if(pathMetadata != null) {
+  sb.append("pathMetadata=" + pathMetadata.getFileStatus().getPath());
+}
+if(dirListingMetadata != null){
+  sb.append("; dirListingMetadata=" + dirListingMetadata.getPath());
+}
+sb.append("}");
+return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c687a661/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 95689e1..49981ed 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -37,13 +37,12 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Map;
 

[26/50] [abbrv] hadoop git commit: HADOOP-15551. Avoid use of Arrays.stream in Configuration.addTags

2018-06-25 Thread xkrogen
HADOOP-15551. Avoid use of Arrays.stream in Configuration.addTags


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43541a18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43541a18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43541a18

Branch: refs/heads/HDFS-12943
Commit: 43541a18907d2303b708ae27a9a2cb5df891da4f
Parents: 32f867a
Author: Todd Lipcon 
Authored: Wed Jun 20 12:38:59 2018 -0700
Committer: Todd Lipcon 
Committed: Wed Jun 20 16:43:10 2018 -0700

--
 .../src/main/java/org/apache/hadoop/conf/Configuration.java  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43541a18/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 19bd5da..b1125e5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3189,25 +3189,25 @@ public class Configuration implements 
Iterable>,
   if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
 String systemTags = prop.getProperty(CommonConfigurationKeys
 .HADOOP_TAGS_SYSTEM);
-Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
+TAGS.addAll(Arrays.asList(systemTags.split(",")));
   }
   // Get all custom tags
   if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
 String customTags = prop.getProperty(CommonConfigurationKeys
 .HADOOP_TAGS_CUSTOM);
-Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
+TAGS.addAll(Arrays.asList(customTags.split(",")));
   }
 
   if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
 String systemTags = prop.getProperty(CommonConfigurationKeys
 .HADOOP_SYSTEM_TAGS);
-Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
+TAGS.addAll(Arrays.asList(systemTags.split(",")));
   }
   // Get all custom tags
   if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) {
 String customTags = prop.getProperty(CommonConfigurationKeys
 .HADOOP_CUSTOM_TAGS);
-Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
+TAGS.addAll(Arrays.asList(customTags.split(",")));
   }
 
 } catch (Exception ex) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii

2018-06-25 Thread xkrogen
HADOOP-15533. Make WASB listStatus messages consistent. Contributed by 
Esfandiar Manii


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3474460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3474460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3474460

Branch: refs/heads/HDFS-12943
Commit: f34744603ee93e082e7ba148df1400af5ac7c30c
Parents: 980031b
Author: Chris Douglas 
Authored: Sun Jun 17 23:12:18 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 23:12:18 2018 -0700

--
 .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3474460/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index e05327e..dfc881a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2886,7 +2886,7 @@ public class NativeAzureFileSystem extends FileSystem {
   // There is no metadata found for the path.
   LOG.debug("Did not find any metadata for path: {}", key);
 
-  throw new FileNotFoundException("File" + f + " does not exist.");
+  throw new FileNotFoundException(f + " is not found");
 }
 
 return status.toArray(new FileStatus[0]);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-8441. Typo in CSQueueUtils local variable names: queueGuranteedResource. Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-8441. Typo in CSQueueUtils local variable names: queueGuranteedResource. 
Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46f90581
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46f90581
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46f90581

Branch: refs/heads/HDFS-12943
Commit: 46f90581641feec37e285964df983d221bee5e1d
Parents: 388fafa
Author: Miklos Szegedi 
Authored: Wed Jun 20 11:58:18 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 11:58:18 2018 -0700

--
 .../scheduler/capacity/CSQueueUtils.java  | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f90581/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 0dfce83..b5edbf7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -184,7 +184,7 @@ public class CSQueueUtils {
 if (Resources.greaterThan(rc, totalPartitionResource,
 totalPartitionResource, Resources.none())) {
 
-  Resource queueGuranteedResource = childQueue
+  Resource queueGuaranteedResource = childQueue
   .getEffectiveCapacity(nodePartition);
 
   //TODO : Modify below code to support Absolute Resource configurations
@@ -204,14 +204,14 @@ public class CSQueueUtils {
 QueueCapacities leafQueueTemplateCapacities = parentQueue
 .getLeafQueueTemplate()
 .getQueueCapacities();
-queueGuranteedResource = Resources.multiply(totalPartitionResource,
+queueGuaranteedResource = Resources.multiply(totalPartitionResource,
 leafQueueTemplateCapacities.getAbsoluteCapacity
 (nodePartition));
   }
 
   // make queueGuranteed >= minimum_allocation to avoid divided by 0.
-  queueGuranteedResource =
-  Resources.max(rc, totalPartitionResource, queueGuranteedResource,
+  queueGuaranteedResource =
+  Resources.max(rc, totalPartitionResource, queueGuaranteedResource,
   minimumAllocation);
 
   Resource usedResource = queueResourceUsage.getUsed(nodePartition);
@@ -220,12 +220,12 @@ public class CSQueueUtils {
   totalPartitionResource);
   usedCapacity =
   Resources.divide(rc, totalPartitionResource, usedResource,
-  queueGuranteedResource);
+  queueGuaranteedResource);
 
   Resource resResource = queueResourceUsage.getReserved(nodePartition);
   reservedCapacity =
   Resources.divide(rc, totalPartitionResource, resResource,
-  queueGuranteedResource);
+  queueGuaranteedResource);
   absoluteReservedCapacity =
   Resources.divide(rc, totalPartitionResource, resResource,
   totalPartitionResource);
@@ -258,16 +258,16 @@ public class CSQueueUtils {
 for (String partition : nodeLabels) {
   // Calculate guaranteed resource for a label in a queue by below logic.
   // (total label resource) * (absolute capacity of label in that queue)
-  Resource queueGuranteedResource = queue.getEffectiveCapacity(partition);
+  Resource queueGuaranteedResource = queue.getEffectiveCapacity(partition);
 
   // Available resource in queue for a specific label will be calculated as
   // {(guaranteed resource for a label in a queue) -
   // (resource usage of that label in the queue)}
   // Finally accumulate this available resource to get total.
   Resource available = (Resources.greaterThan(rc, cluster,
-  queueGuranteedResource,
+  queueGuaranteedResource,
   queue.getQueueResourceUsage().getUsed(partition))) ? Resources
-  .componentwiseMax(Resources.subtractFrom(queueGuranteedResource,
+  .componentwiseMax(Resources.subtractFrom(queueGuaranteedResource,
   

[38/50] [abbrv] hadoop git commit: HDDS-177. Create a releasable ozonefs artifact Contributed by Marton, Elek.

2018-06-25 Thread xkrogen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
new file mode 100644
index 000..3156eb2
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract test for ROOT directory operations.
+ */
+public class ITestOzoneContractRootDir extends
+AbstractContractRootDirectoryTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+return new OzoneContract(conf);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
new file mode 100644
index 000..c4bc0ff
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests covering file seek.
+ */
+public class ITestOzoneContractSeek extends AbstractContractSeekTest {
+  @BeforeClass
+  public static void createCluster() throws IOException {
+OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
 

[11/50] [abbrv] hadoop git commit: HDDS-141. Remove PipeLine Class from SCM and move the data field in the Pipeline to ContainerInfo. Contributed by Shashikant Banerjee.

2018-06-25 Thread xkrogen
HDDS-141. Remove PipeLine Class from SCM and move the data field in the 
Pipeline to ContainerInfo. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a4632d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a4632d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a4632d3

Branch: refs/heads/HDFS-12943
Commit: 2a4632d3d7b82980d10cc90cdfc52afd866cebb8
Parents: f347446
Author: Mukul Kumar Singh 
Authored: Sun Jun 17 23:48:33 2018 -0700
Committer: Mukul Kumar Singh 
Committed: Sun Jun 17 23:48:49 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |  32 +
 .../scm/container/common/helpers/Pipeline.java  | 142 +++
 .../common/helpers/PipelineChannel.java | 124 
 hadoop-hdds/common/src/main/proto/hdds.proto|   8 +-
 .../scm/container/closer/ContainerCloser.java   |   6 +-
 .../hdds/scm/pipelines/PipelineManager.java |  67 +
 .../hdds/scm/pipelines/PipelineSelector.java|  11 +-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  13 +-
 .../standalone/StandaloneManagerImpl.java   |   8 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |   8 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   8 +-
 .../ozone/container/ContainerTestHelper.java|  19 +--
 .../genesis/BenchMarkContainerStateMap.java |  11 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|   6 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   4 +-
 15 files changed, 191 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a4632d3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 2c38d45..ee05c87 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
 import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.PropertyAccessor;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectWriter;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.util.Time;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Comparator;
 
 import static java.lang.Math.max;
@@ -63,6 +65,13 @@ public class ContainerInfo
   private String owner;
   private long containerID;
   private long deleteTransactionId;
+  /**
+   * Allows you to maintain private data on ContainerInfo. This is not
+   * serialized via protobuf, just allows us to maintain some private data.
+   */
+  @JsonIgnore
+  private byte[] data;
+
   ContainerInfo(
   long containerID,
   HddsProtos.LifeCycleState state,
@@ -296,6 +305,29 @@ public class ContainerInfo
   }
 
   /**
+   * Returns private data that is set on this containerInfo.
+   *
+   * @return blob, the user can interpret it any way they like.
+   */
+  public byte[] getData() {
+if (this.data != null) {
+  return Arrays.copyOf(this.data, this.data.length);
+} else {
+  return null;
+}
+  }
+
+  /**
+   * Set private data on ContainerInfo object.
+   *
+   * @param data -- private data.
+   */
+  public void setData(byte[] data) {
+if (data != null) {
+  this.data = Arrays.copyOf(data, data.length);
+}
+  }
+  /**
* Builder class for ContainerInfo.
*/
   public static class Builder {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a4632d3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index 8740838..c5794f4 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -27,14 +27,14 @@ import com.fasterxml.jackson.databind.ObjectWriter;
 import com.fasterxml.jackson.databind.ser.FilterProvider;
 import 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-06-25 Thread xkrogen
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da14307c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da14307c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da14307c

Branch: refs/heads/HDFS-12943
Commit: da14307c354f5901c60e8eed97c1a793a62a8371
Parents: 292ccdc 238fe00
Author: Erik Krogen 
Authored: Tue Jun 26 11:00:27 2018 +0530
Committer: Erik Krogen 
Committed: Tue Jun 26 11:00:27 2018 +0530

--
 dev-support/bin/ozone-dist-layout-stitching |   4 +-
 .../org/apache/hadoop/conf/Configuration.java   |   8 +-
 .../crypto/key/kms/KMSClientProvider.java   |  16 +-
 .../java/org/apache/hadoop/fs/BBPartHandle.java |  58 ++
 .../org/apache/hadoop/fs/BBUploadHandle.java|  57 ++
 .../fs/CommonConfigurationKeysPublic.java   |   4 +-
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  22 +
 .../java/org/apache/hadoop/fs/FileContext.java  |  66 ++
 .../hadoop/fs/FileSystemMultipartUploader.java  | 132 
 .../hadoop/fs/LocalFileSystemPathHandle.java| 100 +++
 .../org/apache/hadoop/fs/MultipartUploader.java |  90 +++
 .../hadoop/fs/MultipartUploaderFactory.java |  65 ++
 .../main/java/org/apache/hadoop/fs/Options.java |   3 +
 .../java/org/apache/hadoop/fs/PartHandle.java   |  45 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  61 +-
 .../UnsupportedMultipartUploaderException.java  |  41 ++
 .../java/org/apache/hadoop/fs/UploadHandle.java |  47 ++
 .../hadoop/metrics2/impl/MetricsConfig.java |  15 +-
 .../security/ShellBasedUnixGroupsMapping.java   |  10 +-
 .../hadoop/security/UserGroupInformation.java   |   4 +-
 .../web/DelegationTokenAuthenticator.java   |   8 +-
 .../apache/hadoop/util/HttpExceptionUtils.java  |  12 +-
 .../apache/hadoop/util/JsonSerialization.java   |  24 +
 .../main/java/org/apache/hadoop/util/Shell.java |   2 +-
 .../org/apache/hadoop/util/SysInfoLinux.java|  19 +-
 .../hadoop-common/src/main/proto/FSProtos.proto |   8 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  16 +
 .../fs/AbstractSystemMultipartUploaderTest.java | 143 
 .../fs/FileContextMainOperationsBaseTest.java   |  44 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  23 +-
 .../TestLocalFileSystemMultipartUploader.java   |  65 ++
 .../AbstractContractPathHandleTest.java |   6 +
 .../TestRawlocalContractPathHandle.java |  40 ++
 .../TestShellBasedUnixGroupsMapping.java|  39 +-
 .../apache/hadoop/util/TestSysInfoLinux.java|  60 ++
 .../src/test/resources/contract/rawlocal.xml|   5 +
 .../test/scripts/process_with_sigterm_trap.sh   |  24 +
 .../crypto/key/kms/server/KMSJSONWriter.java|   6 +-
 hadoop-dist/pom.xml |   5 +
 .../container/common/helpers/ContainerInfo.java |  32 +
 .../scm/container/common/helpers/Pipeline.java  | 142 ++--
 .../common/helpers/PipelineChannel.java | 124 
 hadoop-hdds/common/src/main/proto/hdds.proto|   8 +-
 .../statemachine/DatanodeStateMachine.java  |   3 +
 .../ReplicateContainerCommandHandler.java   |  67 ++
 .../states/endpoint/HeartbeatEndpointTask.java  |  12 +
 .../protocol/commands/CommandForDatanode.java   |  45 ++
 .../commands/ReplicateContainerCommand.java |  94 +++
 .../StorageContainerDatanodeProtocol.proto  |  12 +-
 .../scm/container/closer/ContainerCloser.java   |   6 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  20 +-
 .../hdds/scm/pipelines/PipelineManager.java |  67 +-
 .../hdds/scm/pipelines/PipelineSelector.java|  11 +-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  13 +-
 .../standalone/StandaloneManagerImpl.java   |   8 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |  11 +
 .../scm/server/StorageContainerManager.java |   8 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |   8 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  39 ++
 .../main/java/org/apache/hadoop/fs/XAttr.java   |   4 +-
 .../hdfs/DFSMultipartUploaderFactory.java   |  40 ++
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |   4 +-
 .../hdfs/client/impl/BlockReaderFactory.java|   2 +-
 .../AddErasureCodingPolicyResponse.java |   4 +-
 .../hdfs/protocol/CacheDirectiveInfo.java   |   4 +-
 .../hadoop/hdfs/protocol/CachePoolInfo.java |   4 +-
 .../hadoop/hdfs/protocol/EncryptionZone.java|   4 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |   4 +-
 .../hdfs/protocol/ErasureCodingPolicyInfo.java  |   4 +-
 .../hadoop/hdfs/protocol/ExtendedBlock.java |   2 +-
 .../hdfs/shortcircuit/DfsClientShmManager.java  |   2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java|   2 +-
 .../hdfs/shortcircuit/ShortCircuitShm.java  |   4 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   7 +-
 ...onfRefreshTokenBasedAccessTokenProvider.java |   8 +-
 .../CredentialBasedAccessTokenProvider.java 

[33/50] [abbrv] hadoop git commit: MAPREDUCE-7114. Make FrameworkUploader symlink ignore improvement. Contributed by Gergo Repas.

2018-06-25 Thread xkrogen
MAPREDUCE-7114. Make FrameworkUploader symlink ignore improvement. Contributed 
by Gergo Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae055622
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae055622
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae055622

Branch: refs/heads/HDFS-12943
Commit: ae055622edeb3cbf82baa6ed952fc2abc84c021e
Parents: 55fad6a
Author: Miklos Szegedi 
Authored: Fri Jun 22 13:05:41 2018 -0700
Committer: Miklos Szegedi 
Committed: Fri Jun 22 13:05:41 2018 -0700

--
 .../hadoop/mapred/uploader/FrameworkUploader.java  |  2 +-
 .../hadoop/mapred/uploader/TestFrameworkUploader.java  | 13 +
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae055622/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index 5316f38..d2116c0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -409,7 +409,7 @@ public class FrameworkUploader implements Runnable {
 linkPath == null ? null : linkPath.getParent();
 java.nio.file.Path normalizedLinkPath =
 linkPathParent == null ? null : linkPathParent.normalize();
-if (normalizedLinkPath != null && jarParent.equals(
+if (normalizedLinkPath != null && jarParent.normalize().equals(
 normalizedLinkPath)) {
   LOG.info(String.format("Ignoring same directory link %s to %s",
   jarPath.toString(), link.toString()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae055622/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java
index c12902c..9c72f72 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java
@@ -440,6 +440,19 @@ public class TestFrameworkUploader {
   }
   Assert.assertTrue(uploader.checkSymlink(symlinkToTarget));
 
+  // Create a symlink to the target with /./ in the path
+  symlinkToTarget = new File(parent.getAbsolutePath() +
+"/./symlinkToTarget2.txt");
+  try {
+Files.createSymbolicLink(
+Paths.get(symlinkToTarget.getAbsolutePath()),
+Paths.get(targetFile.getAbsolutePath()));
+  } catch (UnsupportedOperationException e) {
+// Symlinks are not supported, so ignore the test
+Assume.assumeTrue(false);
+  }
+  Assert.assertTrue(uploader.checkSymlink(symlinkToTarget));
+
   // Create a symlink outside the current directory
   File symlinkOutside = new File(parent, "symlinkToParent.txt");
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HDDS-177. Create a releasable ozonefs artifact Contributed by Marton, Elek.

2018-06-25 Thread xkrogen
HDDS-177. Create a releasable ozonefs artifact
Contributed by Marton, Elek.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e16e5b30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e16e5b30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e16e5b30

Branch: refs/heads/HDFS-12943
Commit: e16e5b307d6c4404db0698b9d128e5bf4aa16a8a
Parents: ca14fec
Author: Anu Engineer 
Authored: Sun Jun 24 01:05:04 2018 -0700
Committer: Anu Engineer 
Committed: Sun Jun 24 01:05:04 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |   4 +-
 hadoop-dist/pom.xml |   5 +
 .../test/acceptance/ozonefs/docker-compose.yaml |  71 ++
 .../src/test/acceptance/ozonefs/docker-config   |  39 ++
 .../src/test/acceptance/ozonefs/ozonefs.robot   |  39 ++
 hadoop-ozone/ozonefs/pom.xml| 211 ++
 .../org/apache/hadoop/fs/ozone/Constants.java   |  42 ++
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |  44 ++
 .../hadoop/fs/ozone/OzoneFSInputStream.java |  79 +++
 .../hadoop/fs/ozone/OzoneFSOutputStream.java|  59 ++
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 689 +++
 .../apache/hadoop/fs/ozone/package-info.java|  30 +
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 157 +
 .../fs/ozone/TestOzoneFileInterfaces.java   | 231 +++
 .../contract/ITestOzoneContractCreate.java  |  48 ++
 .../contract/ITestOzoneContractDelete.java  |  48 ++
 .../contract/ITestOzoneContractDistCp.java  |  50 ++
 .../ITestOzoneContractGetFileStatus.java|  61 ++
 .../ozone/contract/ITestOzoneContractMkdir.java |  48 ++
 .../ozone/contract/ITestOzoneContractOpen.java  |  47 ++
 .../contract/ITestOzoneContractRename.java  |  49 ++
 .../contract/ITestOzoneContractRootDir.java |  51 ++
 .../ozone/contract/ITestOzoneContractSeek.java  |  47 ++
 .../hadoop/fs/ozone/contract/OzoneContract.java | 123 
 .../src/test/resources/contract/ozone.xml   | 113 +++
 .../ozonefs/src/test/resources/log4j.properties |  23 +
 hadoop-ozone/pom.xml|   1 +
 hadoop-project/pom.xml  |   6 +-
 hadoop-tools/hadoop-ozone/pom.xml   | 174 -
 .../org/apache/hadoop/fs/ozone/Constants.java   |  42 --
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |  44 --
 .../hadoop/fs/ozone/OzoneFSInputStream.java |  79 ---
 .../hadoop/fs/ozone/OzoneFSOutputStream.java|  59 --
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 689 ---
 .../apache/hadoop/fs/ozone/package-info.java|  30 -
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 157 -
 .../fs/ozone/TestOzoneFileInterfaces.java   | 231 ---
 .../contract/ITestOzoneContractCreate.java  |  48 --
 .../contract/ITestOzoneContractDelete.java  |  48 --
 .../contract/ITestOzoneContractDistCp.java  |  50 --
 .../ITestOzoneContractGetFileStatus.java|  61 --
 .../ozone/contract/ITestOzoneContractMkdir.java |  48 --
 .../ozone/contract/ITestOzoneContractOpen.java  |  47 --
 .../contract/ITestOzoneContractRename.java  |  49 --
 .../contract/ITestOzoneContractRootDir.java |  51 --
 .../ozone/contract/ITestOzoneContractSeek.java  |  47 --
 .../hadoop/fs/ozone/contract/OzoneContract.java | 123 
 .../src/test/resources/contract/ozone.xml   | 113 ---
 .../src/test/resources/log4j.properties |  23 -
 hadoop-tools/hadoop-tools-dist/pom.xml  |  15 -
 hadoop-tools/pom.xml|  11 -
 51 files changed, 2413 insertions(+), 2241 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index ad8abe2..be330d5 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -145,6 +145,8 @@ run copy 
"${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
 run copy 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}"
 .
 run copy 
"${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
 run copy 
"${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+mkdir -p "./share/hadoop/ozonefs"
+cp 
"${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar"
 "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
 # Optional documentation, could be missing
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ksm/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
@@ 

[50/50] [abbrv] hadoop git commit: HDFS-13609. [SBN read] Edit Tail Fast Path Part 3: NameNode-side changes to support tailing edits via RPC. Contributed by Erik Krogen.

2018-06-25 Thread xkrogen
HDFS-13609. [SBN read] Edit Tail Fast Path Part 3: NameNode-side changes to 
support tailing edits via RPC. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83109733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83109733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83109733

Branch: refs/heads/HDFS-12943
Commit: 831097335833681505af257e15362bfef66987ad
Parents: da14307
Author: Erik Krogen 
Authored: Tue May 22 16:45:26 2018 -0700
Committer: Erik Krogen 
Committed: Tue Jun 26 11:01:07 2018 +0530

--
 .../hdfs/qjournal/client/AsyncLogger.java   |   7 +
 .../hdfs/qjournal/client/AsyncLoggerSet.java|  14 ++
 .../hdfs/qjournal/client/IPCLoggerChannel.java  |  14 ++
 .../qjournal/client/QuorumJournalManager.java   | 111 +++-
 .../server/namenode/EditLogFileInputStream.java |  44 +++
 .../hdfs/server/namenode/ha/EditLogTailer.java  |   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../client/TestQuorumJournalManager.java| 130 +++
 .../client/TestQuorumJournalManagerUnit.java| 101 +-
 .../namenode/TestEditLogFileInputStream.java|  18 +++
 10 files changed, 439 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83109733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
index d2b48cc..7230ebc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
@@ -22,6 +22,7 @@ import java.net.URL;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
+import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -106,6 +107,12 @@ interface AsyncLogger {
* Begin a new epoch on the target node.
*/
   public ListenableFuture newEpoch(long epoch);
+
+  /**
+   * Fetch journaled edits from the cache.
+   */
+  public ListenableFuture getJournaledEdits(
+  long fromTxnId, int maxTransactions);
   
   /**
* Fetch the list of edit logs available on the remote node.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83109733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
index d46c2cf..0366466 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
@@ -25,6 +25,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -261,6 +262,19 @@ class AsyncLoggerSet {
 return QuorumCall.create(calls);
   }
 
+  public QuorumCall
+  getJournaledEdits(long fromTxnId, int maxTransactions) {
+Map> calls
+= Maps.newHashMap();
+for (AsyncLogger logger : loggers) {
+  ListenableFuture future =
+  logger.getJournaledEdits(fromTxnId, maxTransactions);
+  calls.put(logger, future);
+}
+return QuorumCall.create(calls);
+  }
+
   public QuorumCall getEditLogManifest(
   long fromTxnId, boolean inProgressOk) {
 

[06/50] [abbrv] hadoop git commit: HDFS-13686. Add overall metrics for FSNamesystemLock. Contributed by Lukas Majercak.

2018-06-25 Thread xkrogen
HDFS-13686. Add overall metrics for FSNamesystemLock. Contributed by Lukas 
Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d31a3ce7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d31a3ce7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d31a3ce7

Branch: refs/heads/HDFS-12943
Commit: d31a3ce767d3bb68bdbb4f36d45600eab9f4f8b7
Parents: 1da3b55
Author: Inigo Goiri 
Authored: Fri Jun 15 15:59:19 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Jun 15 15:59:19 2018 -0700

--
 .../hdfs/server/namenode/FSNamesystemLock.java | 17 -
 .../hdfs/server/namenode/TestFSNamesystemLock.java | 10 --
 2 files changed, 20 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a3ce7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 900f8a2..f8e69e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -107,6 +107,8 @@ class FSNamesystemLock {
   private static final String WRITE_LOCK_METRIC_PREFIX = "FSNWriteLock";
   private static final String LOCK_METRIC_SUFFIX = "Nanos";
 
+  private static final String OVERALL_METRIC_NAME = "Overall";
+
   FSNamesystemLock(Configuration conf,
   MutableRatesWithAggregation detailedHoldTimeMetrics) {
 this(conf, detailedHoldTimeMetrics, new Timer());
@@ -320,12 +322,17 @@ class FSNamesystemLock {
*/
   private void addMetric(String operationName, long value, boolean isWrite) {
 if (metricsEnabled) {
-  String metricName =
-  (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) +
-  org.apache.commons.lang.StringUtils.capitalize(operationName) +
-  LOCK_METRIC_SUFFIX;
-  detailedHoldTimeMetrics.add(metricName, value);
+  String opMetric = getMetricName(operationName, isWrite);
+  detailedHoldTimeMetrics.add(opMetric, value);
+
+  String overallMetric = getMetricName(OVERALL_METRIC_NAME, isWrite);
+  detailedHoldTimeMetrics.add(overallMetric, value);
 }
   }
 
+  private static String getMetricName(String operationName, boolean isWrite) {
+return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) +
+org.apache.commons.lang.StringUtils.capitalize(operationName) +
+LOCK_METRIC_SUFFIX;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31a3ce7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index 2daf5c2..49506fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -333,7 +333,7 @@ public class TestFSNamesystemLock {
 FSNamesystemLock fsLock = new FSNamesystemLock(conf, rates, timer);
 
 fsLock.readLock();
-timer.advanceNanos(120);
+timer.advanceNanos(130);
 fsLock.readUnlock("foo");
 fsLock.readLock();
 timer.advanceNanos(240);
@@ -353,12 +353,18 @@ public class TestFSNamesystemLock {
 MetricsRecordBuilder rb = MetricsAsserts.mockMetricsRecordBuilder();
 rates.snapshot(rb, true);
 
-assertGauge("FSNReadLockFooNanosAvgTime", 180.0, rb);
+assertGauge("FSNReadLockFooNanosAvgTime", 185.0, rb);
 assertCounter("FSNReadLockFooNanosNumOps", 2L, rb);
 assertGauge("FSNReadLockBarNanosAvgTime", 200.0, rb);
 assertCounter("FSNReadLockBarNanosNumOps", 1L, rb);
 assertGauge("FSNWriteLockBazNanosAvgTime", 100.0, rb);
 assertCounter("FSNWriteLockBazNanosNumOps", 1L, rb);
+
+// Overall
+assertGauge("FSNReadLockOverallNanosAvgTime", 190.0, rb);
+assertCounter("FSNReadLockOverallNanosNumOps", 3L, rb);
+assertGauge("FSNWriteLockOverallNanosAvgTime", 100.0, rb);
+assertCounter("FSNWriteLockOverallNanosNumOps", 1L, rb);
   }
 
   /**



[14/50] [abbrv] hadoop git commit: MAPREDUCE-7063. Fix log level inconsistency in CombineFileInputFormat.java

2018-06-25 Thread xkrogen
MAPREDUCE-7063. Fix log level inconsistency in CombineFileInputFormat.java

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e94e597
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e94e597
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e94e597

Branch: refs/heads/HDFS-12943
Commit: 1e94e5977f9075af2f74e30a3b8e52f7ded67863
Parents: 2c87ec5
Author: Vidura Mudalige 
Authored: Thu Jun 14 21:23:01 2018 +0530
Committer: Akira Ajisaka 
Committed: Mon Jun 18 14:25:11 2018 -0700

--
 .../hadoop/mapreduce/lib/input/CombineFileInputFormat.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e94e597/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
index c7a737c..b16e127 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
@@ -425,8 +425,8 @@ public abstract class CombineFileInputFormat
   if (completedNodes.size() == totalNodes || totalLength == 0) {
 // All nodes have been walked over and marked as completed or all 
blocks
 // have been assigned. The rest should be handled via rackLock 
assignment.
-LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: "
-+ completedNodes.size() + ", size left: " + totalLength);
+LOG.debug("Terminated node allocation with : CompletedNodes: {}, size 
left: {}",
+completedNodes.size(), totalLength);
 break;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HDDS-184. Upgrade common-langs version to 3.7 in hadoop-tools/hadoop-ozone. Contributed by Takanobu Asanuma.

2018-06-25 Thread xkrogen
HDDS-184. Upgrade common-langs version to 3.7 in hadoop-tools/hadoop-ozone.
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca14fec0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca14fec0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca14fec0

Branch: refs/heads/HDFS-12943
Commit: ca14fec02fb14e1b708f266bc715e84ae9784d6f
Parents: 8a32bc3
Author: Anu Engineer 
Authored: Sat Jun 23 15:49:44 2018 -0700
Committer: Anu Engineer 
Committed: Sat Jun 23 15:49:44 2018 -0700

--
 .../ozone/genconf/TestGenerateOzoneRequiredConfigurations.java | 2 +-
 .../src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java  | 2 +-
 .../java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java| 2 +-
 .../java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java   | 2 +-
 .../java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java| 2 +-
 5 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca14fec0/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
index cfd1159..c2f5eb7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.genconf;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.hamcrest.CoreMatchers;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca14fec0/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 0ff1d50..6906a9d 100644
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.http.client.utils.URIBuilder;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca14fec0/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index b82c4a1..ad21f28 100644
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataOutputStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca14fec0/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index 5a7cb4f..a225702 100644
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ 

[44/50] [abbrv] hadoop git commit: HDDS-191. Queue SCMCommands via EventQueue in SCM. Contributed by Elek, Marton.

2018-06-25 Thread xkrogen
HDDS-191. Queue SCMCommands via EventQueue in SCM.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a55d6bba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a55d6bba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a55d6bba

Branch: refs/heads/HDFS-12943
Commit: a55d6bba71c81c1c4e9d8cd11f55c78f10a548b0
Parents: 4ffe68a
Author: Anu Engineer 
Authored: Mon Jun 25 13:05:22 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jun 25 13:05:22 2018 -0700

--
 .../protocol/commands/CommandForDatanode.java   | 45 
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 20 -
 .../scm/server/StorageContainerManager.java |  8 +++-
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 39 +
 4 files changed, 110 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55d6bba/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
new file mode 100644
index 000..0c4964a
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import java.util.UUID;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * Command for the datanode with the destination address.
+ */
+public class CommandForDatanode {
+
+  private final UUID datanodeId;
+
+  private final SCMCommand command;
+
+  public CommandForDatanode(UUID datanodeId, SCMCommand command) {
+this.datanodeId = datanodeId;
+this.command = command;
+  }
+
+  public UUID getDatanodeId() {
+return datanodeId;
+  }
+
+  public SCMCommand getCommand() {
+return command;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55d6bba/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index b339fb7..fc8b013 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,6 +25,10 @@ import 
org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -42,11 +46,14 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
 import 

[46/50] [abbrv] hadoop git commit: HADOOP-15550. Avoid static initialization of ObjectMappers

2018-06-25 Thread xkrogen
HADOOP-15550. Avoid static initialization of ObjectMappers


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a3c6e9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a3c6e9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a3c6e9c

Branch: refs/heads/HDFS-12943
Commit: 7a3c6e9c3cd9ffdc71946fd12f5c3d59718c4939
Parents: c687a66
Author: Todd Lipcon 
Authored: Mon Jun 25 15:36:45 2018 -0700
Committer: Todd Lipcon 
Committed: Mon Jun 25 15:36:45 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   |  7 ++
 .../web/DelegationTokenAuthenticator.java   |  8 ++-
 .../apache/hadoop/util/HttpExceptionUtils.java  | 12 ++
 .../apache/hadoop/util/JsonSerialization.java   | 24 
 .../crypto/key/kms/server/KMSJSONWriter.java|  6 ++---
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  7 ++
 ...onfRefreshTokenBasedAccessTokenProvider.java |  8 +++
 .../CredentialBasedAccessTokenProvider.java |  8 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   |  8 +++
 .../hadoop/fs/azure/security/JsonUtils.java |  4 ++--
 10 files changed, 45 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a3c6e9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index edbf897..7b46075 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.security.token.TokenRenewer;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.JsonSerialization;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
@@ -79,7 +80,6 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
@@ -132,9 +132,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private final ValueQueue encKeyVersionQueue;
 
-  private static final ObjectWriter WRITER =
-  new ObjectMapper().writerWithDefaultPrettyPrinter();
-
   private final Text dtService;
 
   // Allow fallback to default kms server port 9600 for certain tests that do
@@ -237,7 +234,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static void writeJson(Object obj, OutputStream os)
   throws IOException {
 Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
-WRITER.writeValue(writer, obj);
+JsonSerialization.writer().writeValue(writer, obj);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a3c6e9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 617773b..0ae2af3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.security.token.delegation.web;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.net.NetUtils;
@@ -31,6 +29,7 @@ import 

[24/50] [abbrv] hadoop git commit: HADOOP-14918. Remove the Local Dynamo DB test option. Contributed by Gabor Bota.

2018-06-25 Thread xkrogen
HADOOP-14918. Remove the Local Dynamo DB test option. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b089a067
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b089a067
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b089a067

Branch: refs/heads/HDFS-12943
Commit: b089a06793d94d42b7da1b7566e366ceb748e081
Parents: d6ee429
Author: Sean Mackrory 
Authored: Wed Jun 20 16:10:36 2018 -0600
Committer: Sean Mackrory 
Committed: Wed Jun 20 16:45:08 2018 -0600

--
 hadoop-project/pom.xml  |   5 -
 hadoop-tools/hadoop-aws/pom.xml |  42 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |  11 +
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |   1 +
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |  21 +-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |   1 -
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  34 +-
 .../fs/s3a/commit/staging/StagingTestBase.java  |  30 +-
 .../s3a/s3guard/DynamoDBLocalClientFactory.java | 160 -
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |   4 +-
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  | 589 ---
 11 files changed, 73 insertions(+), 825 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b089a067/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8cb5bfc..ed0187b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -2060,10 +2060,5 @@
   
 
   
-
-  dynamodb-local-oregon
-  DynamoDB Local Release Repository
-  https://s3-us-west-2.amazonaws.com/dynamodb-local/release
-
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b089a067/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 24ed11d..c6dddb0 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -36,7 +36,6 @@
 true
 ${project.build.directory}/test
 
-1.11.86
 
 unset
 
@@ -49,6 +48,8 @@
 false
 
false
 
local
+
+20
 
   
 
@@ -162,6 +163,7 @@
 
${fs.s3a.s3guard.test.authoritative}
 
${fs.s3a.s3guard.test.implementation}
 
+
${test.integration.timeout}
   
   
   
@@ -299,23 +301,10 @@
   
 
 
-
-
-  dynamodblocal
-  
-
-  dynamodblocal
-
-  
-  
-
dynamodblocal
-  
-
-
 
 
-  non-auth
+  auth
   
 
   auth
@@ -346,6 +335,9 @@
 maven-surefire-plugin
 
   3600
+  
+
${test.integration.timeout}
+  
 
   
   
@@ -418,26 +410,6 @@
   compile
 
 
-  com.amazonaws
-  DynamoDBLocal
-  ${dynamodb.local.version}
-  test
-  
-
-  org.hamcrest
-  hamcrest-core
-
-
-  org.eclipse.jetty
-  jetty-http
-
-
-  org.apache.commons
-  commons-lang3
-
-  
-
-
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b089a067/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 4c95843..c521936 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -402,6 +402,17 @@ public final class Constants {
   "fs.s3a.s3guard.ddb.table";
 
   /**
+   * Test table name to use during DynamoDB integration test.
+   *
+   * The table will be modified, and deleted in the end of the tests.
+   * If this value is not set, the integration tests that would be destructive
+   * won't run.
+   */
+  @InterfaceStability.Unstable
+  public static final String S3GUARD_DDB_TEST_TABLE_NAME_KEY =
+  "fs.s3a.s3guard.ddb.test.table";
+
+  /**
* Whether to create the DynamoDB table if the table does not exist.
*/
   @InterfaceStability.Unstable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b089a067/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 

[08/50] [abbrv] hadoop git commit: HADOOP-15523. Shell command timeout given is in seconds whereas it is taken as millisec while scheduling. Contributed by Bilwa S T.

2018-06-25 Thread xkrogen
HADOOP-15523. Shell command timeout given is in seconds whereas it is taken as 
millisec while scheduling. Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3905fdb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3905fdb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3905fdb7

Branch: refs/heads/HDFS-12943
Commit: 3905fdb793e6370243d05d0c3036ca69898fe3fb
Parents: 8762e9c
Author: Surendra Singh Lilhore 
Authored: Sun Jun 17 12:12:01 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Sun Jun 17 12:12:01 2018 +0530

--
 .../fs/CommonConfigurationKeysPublic.java   |  4 +-
 .../security/ShellBasedUnixGroupsMapping.java   | 10 ++---
 .../main/java/org/apache/hadoop/util/Shell.java |  2 +-
 .../TestShellBasedUnixGroupsMapping.java| 39 ++--
 4 files changed, 43 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3905fdb7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 9e0ba20..c7f32f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
-  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY =
   "hadoop.security.groups.shell.command.timeout";
   /**
* @see
@@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic {
* core-default.xml
*/
   public static final long
-  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT =
   0L;
   /**
* @see

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3905fdb7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 94698d8..976ddba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
@@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured
   protected static final Logger LOG =
   LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
 
-  private long timeout = 0L;
+  private long timeout = CommonConfigurationKeys.
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
   private static final List EMPTY_GROUPS = new LinkedList<>();
 
   @Override
@@ -61,10 +61,10 @@ public class ShellBasedUnixGroupsMapping extends Configured
 if (conf != null) {
   timeout = conf.getTimeDuration(
   CommonConfigurationKeys.
-  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
   CommonConfigurationKeys.
-  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT,
-  TimeUnit.SECONDS);
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT,
+  TimeUnit.MILLISECONDS);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3905fdb7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 0b76f0d..46a0fcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -1191,7 +1191,7 @@ public 

[47/50] [abbrv] hadoop git commit: YARN-8438. TestContainer.testKillOnNew flaky on trunk. Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-8438. TestContainer.testKillOnNew flaky on trunk. Contributed by Szilard 
Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35ec9401
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35ec9401
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35ec9401

Branch: refs/heads/HDFS-12943
Commit: 35ec9401e829bfa10994790659a26b0babacae35
Parents: 7a3c6e9
Author: Miklos Szegedi 
Authored: Mon Jun 25 09:23:11 2018 -0700
Committer: Miklos Szegedi 
Committed: Mon Jun 25 15:47:54 2018 -0700

--
 .../containermanager/container/TestContainer.java| 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35ec9401/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 1a263ee..edf26d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -116,7 +116,7 @@ public class TestContainer {
   final NodeManagerMetrics metrics = NodeManagerMetrics.create();
   final Configuration conf = new YarnConfiguration();
   final String FAKE_LOCALIZATION_ERROR = "Fake localization error";
-  
+
   /**
* Verify correct container request events sent to localizer.
*/
@@ -591,9 +591,8 @@ public class TestContainer {
   Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
   containerMetrics.exitCode.value());
   Assert.assertTrue(containerMetrics.startTime.value() > 0);
-  Assert.assertTrue(
-  containerMetrics.finishTime.value() > containerMetrics.startTime
-  .value());
+  Assert.assertTrue(containerMetrics.finishTime.value() >=
+  containerMetrics.startTime.value());
   Assert.assertEquals(ContainerEventType.KILL_CONTAINER,
   wc.initStateToEvent.get(ContainerState.NEW));
   Assert.assertEquals(ContainerState.DONE,
@@ -1612,4 +1611,5 @@ public class TestContainer {
   return ((ContainerImpl)c).getRetryPolicy();
 }
   }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-8326. Removed exit code file check for launched container. Contributed by Shane Kumpf

2018-06-25 Thread xkrogen
YARN-8326.  Removed exit code file check for launched container.
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a32bc39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a32bc39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a32bc39

Branch: refs/heads/HDFS-12943
Commit: 8a32bc39eb210fca8052c472601e24c2446b4cc2
Parents: 1cdce86
Author: Eric Yang 
Authored: Fri Jun 22 19:12:48 2018 -0400
Committer: Eric Yang 
Committed: Fri Jun 22 19:12:48 2018 -0400

--
 .../launcher/ContainerLaunch.java| 19 ---
 1 file changed, 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a32bc39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index bb842af..04295e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -808,25 +808,6 @@ public class ContainerLaunch implements Callable {
   }
 }
 
-final int sleepMsec = 100;
-int msecLeft = 2000;
-if (pidFilePath != null) {
-  File file = new File(getExitCodeFile(pidFilePath.toString()));
-  while (!file.exists() && msecLeft >= 0) {
-try {
-  Thread.sleep(sleepMsec);
-} catch (InterruptedException e) {
-}
-msecLeft -= sleepMsec;
-  }
-  if (msecLeft < 0) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Timeout while waiting for the exit code file:  "
-  + file.getAbsolutePath());
-}
-  }
-}
-
 // Reap the container
 boolean result = exec.reapContainer(
 new ContainerReapContext.Builder()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-8443. Total #VCores in cluster metrics is wrong when CapacityScheduler reserved some containers. Contributed by Tao Yang.

2018-06-25 Thread xkrogen
YARN-8443. Total #VCores in cluster metrics is wrong when CapacityScheduler 
reserved some containers. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/440140ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/440140ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/440140ce

Branch: refs/heads/HDFS-12943
Commit: 440140cea6718229094a3d2b97b9b9bd28b95d9b
Parents: e16e5b3
Author: Weiwei Yang 
Authored: Mon Jun 25 09:15:31 2018 +0800
Committer: Weiwei Yang 
Committed: Mon Jun 25 09:15:31 2018 +0800

--
 .../yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/440140ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index 84f70d9..69d88aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -101,7 +101,7 @@ public class ClusterMetricsInfo {
   CapacityScheduler cs = (CapacityScheduler) rs;
   this.totalMB = availableMB + allocatedMB + reservedMB;
   this.totalVirtualCores =
-  availableVirtualCores + allocatedVirtualCores + containersReserved;
+  availableVirtualCores + allocatedVirtualCores + reservedVirtualCores;
   // TODO, add support of other schedulers to get total used resources
   // across partition.
   if (cs.getRootQueue() != null


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-15416. Clear error message in S3Guard diff if source not found. Contributed by Gabor Bota.

2018-06-25 Thread xkrogen
HADOOP-15416. Clear error message in S3Guard diff if source not found. 
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55fad6a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55fad6a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55fad6a3

Branch: refs/heads/HDFS-12943
Commit: 55fad6a3de3125d9e7e2e9a5f8fa5b1b22a1de60
Parents: 6432128
Author: Sean Mackrory 
Authored: Fri Jun 22 11:36:36 2018 -0600
Committer: Sean Mackrory 
Committed: Fri Jun 22 11:36:56 2018 -0600

--
 .../main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55fad6a3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index fbffba9..ac10e08 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -805,7 +805,9 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
  */
 private void compareDir(FileStatus msDir, FileStatus s3Dir,
 PrintStream out) throws IOException {
-  Preconditions.checkArgument(!(msDir == null && s3Dir == null));
+  Preconditions.checkArgument(!(msDir == null && s3Dir == null),
+  "The path does not exist in metadata store and on s3.");
+
   if (msDir != null && s3Dir != null) {
 Preconditions.checkArgument(msDir.getPath().equals(s3Dir.getPath()),
 String.format("The path from metadata store and s3 are different:" 
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: HADOOP-15527. Improve delay check for stopping processes. Contributed by Vinod Kumar Vavilapalli

2018-06-25 Thread xkrogen
HADOOP-15527.  Improve delay check for stopping processes.
   Contributed by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c87ec5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c87ec5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c87ec5a

Branch: refs/heads/HDFS-12943
Commit: 2c87ec5affefeb1dc794c4eaae685a4e544f1841
Parents: fba9d7c
Author: Eric Yang 
Authored: Mon Jun 18 14:28:22 2018 -0400
Committer: Eric Yang 
Committed: Mon Jun 18 14:28:22 2018 -0400

--
 .../test/scripts/process_with_sigterm_trap.sh   | 24 
 1 file changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c87ec5a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
--
diff --git a/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh 
b/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
new file mode 100644
index 000..d7c7427
--- /dev/null
+++ b/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+trap "echo SIGTERM trapped!" SIGTERM
+trap "echo SIGINT  trapped!" SIGINT
+
+echo "$$" > "$1"
+
+while true; do
+  sleep 1.3
+done


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-8412. Move ResourceRequest.clone logic everywhere into a proper API. Contributed by Botong Huang.

2018-06-25 Thread xkrogen
YARN-8412. Move ResourceRequest.clone logic everywhere into a proper API. 
Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99948565
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99948565
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99948565

Branch: refs/heads/HDFS-12943
Commit: 99948565cb5d5706241d7a8fc591e1617c499e03
Parents: 59de967
Author: Inigo Goiri 
Authored: Thu Jun 21 18:24:10 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Jun 21 18:24:10 2018 -0700

--
 .../yarn/api/records/ResourceRequest.java   | 20 
 .../yarn/client/api/impl/AMRMClientImpl.java| 11 +--
 .../api/impl/TestAMRMClientOnRMRestart.java |  6 +-
 .../hadoop/yarn/server/AMRMClientRelayer.java   |  8 +---
 .../LocalityMulticastAMRMProxyPolicy.java   | 10 +-
 .../server/scheduler/ResourceRequestSet.java| 14 ++
 .../hadoop/yarn/server/utils/BuilderUtils.java  | 12 
 .../LocalityAppPlacementAllocator.java  | 10 ++
 .../server/resourcemanager/Application.java | 10 --
 .../server/resourcemanager/TestAppManager.java  | 18 ++
 10 files changed, 34 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99948565/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index eea81fe..a863910 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -102,6 +102,26 @@ public abstract class ResourceRequest implements 
Comparable {
 .build();
   }
 
+  /**
+   * Clone a ResourceRequest object (shallow copy). Please keep it loaded with
+   * all (new) fields
+   *
+   * @param rr the object to copy from
+   * @return the copied object
+   */
+  @Public
+  @Evolving
+  public static ResourceRequest clone(ResourceRequest rr) {
+// Please keep it loaded with all (new) fields
+return ResourceRequest.newBuilder().priority(rr.getPriority())
+.resourceName(rr.getResourceName()).capability(rr.getCapability())
+.numContainers(rr.getNumContainers())
+.relaxLocality(rr.getRelaxLocality())
+.nodeLabelExpression(rr.getNodeLabelExpression())
+.executionTypeRequest(rr.getExecutionTypeRequest())
+.allocationRequestId(rr.getAllocationRequestId()).build();
+  }
+
   @Public
   @Unstable
   public static ResourceRequestBuilder newBuilder() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99948565/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 36c3cf1..7265d24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -451,16 +451,7 @@ public class AMRMClientImpl 
extends AMRMClient {
 for(ResourceRequest r : ask) {
   // create a copy of ResourceRequest as we might change it while the
   // RPC layer is using it to send info across
-  ResourceRequest rr =
-  ResourceRequest.newBuilder().priority(r.getPriority())
-  .resourceName(r.getResourceName()).capability(r.getCapability())
-  .numContainers(r.getNumContainers())
-  .relaxLocality(r.getRelaxLocality())
-  .nodeLabelExpression(r.getNodeLabelExpression())
-  .executionTypeRequest(r.getExecutionTypeRequest())
-  .allocationRequestId(r.getAllocationRequestId())
-  .build();
-  askList.add(rr);
+  askList.add(ResourceRequest.clone(r));
 }
 return askList;
   }


[43/50] [abbrv] hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-06-25 Thread xkrogen
YARN-8457. Compilation is broken with -Pyarn-ui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ffe68a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ffe68a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ffe68a6

Branch: refs/heads/HDFS-12943
Commit: 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f
Parents: abc3e4b
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:38:03 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ffe68a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-13673. TestNameNodeMetrics fails on Windows. Contributed by Zuoming Zhang.

2018-06-25 Thread xkrogen
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 292ccdce8 -> 831097335


HDFS-13673. TestNameNodeMetrics fails on Windows. Contributed by Zuoming Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43d994e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43d994e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43d994e4

Branch: refs/heads/HDFS-12943
Commit: 43d994e4a6dfd1c24eafb909d6f8a0663b20769a
Parents: b272b71
Author: Inigo Goiri 
Authored: Fri Jun 15 10:33:28 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Jun 15 10:33:28 2018 -0700

--
 .../hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43d994e4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index e34deea..05cf2ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -71,6 +71,7 @@ import 
org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -271,7 +272,8 @@ public class TestNameNodeMetrics {
 File dataDir = new File(fsVolume.getBaseURI());
 long capacity = fsVolume.getCapacity();
 volumeReferences.close();
-DataNodeTestUtils.injectDataDirFailure(dataDir);
+File storageDir = new File(dataDir, Storage.STORAGE_DIR_CURRENT);
+DataNodeTestUtils.injectDataDirFailure(storageDir);
 DataNodeTestUtils.waitForDiskError(dn, fsVolume);
 DataNodeTestUtils.triggerHeartbeat(dn);
 BlockManagerTestUtil.checkHeartbeat(bm);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: YARN-8437. Build oom-listener fails on older versions. (Miklos Szegedi via Haibo Chen)

2018-06-25 Thread xkrogen
YARN-8437. Build oom-listener fails on older versions. (Miklos Szegedi via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4939ffed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4939ffed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4939ffed

Branch: refs/heads/HDFS-12943
Commit: 4939ffedb151ce1550fcdd7ac04c79d8d0195891
Parents: 2d87592
Author: Haibo Chen 
Authored: Wed Jun 20 10:35:52 2018 -0700
Committer: Haibo Chen 
Committed: Wed Jun 20 10:42:13 2018 -0700

--
 .../src/CMakeLists.txt  |  4 +--
 .../oom-listener/test/oom_listener_test_main.cc | 33 ++--
 2 files changed, 19 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4939ffed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index a614f80..300bb65 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -188,5 +188,5 @@ add_executable(test-oom-listener
 main/native/oom-listener/impl/oom_listener.h
 main/native/oom-listener/test/oom_listener_test_main.cc
 )
-target_link_libraries(test-oom-listener gtest)
-output_directory(test-oom-listener test)
\ No newline at end of file
+target_link_libraries(test-oom-listener gtest rt)
+output_directory(test-oom-listener test)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4939ffed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
index 9627632..421c21e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
@@ -20,6 +20,7 @@
 
 extern "C" {
 #include "oom_listener.h"
+#include 
 }
 
 #include 
@@ -49,10 +50,10 @@ int main(int argc, char **argv) {
 
 class OOMListenerTest : public ::testing::Test {
 private:
-  char cgroup[PATH_MAX] = {};
-  const char* cgroup_root = nullptr;
+  char cgroup[PATH_MAX];
+  const char* cgroup_root;
 public:
-  OOMListenerTest() = default;
+  OOMListenerTest() : cgroup_root(NULL) {}
 
   virtual ~OOMListenerTest() = default;
   virtual const char* GetCGroup() { return cgroup; }
@@ -99,7 +100,7 @@ public:
 if (cgroup[0] != '\0') {
   rmdir(cgroup);
 }
-if (cgroup_root != nullptr &&
+if (cgroup_root != NULL &&
 cgroup_root != cgroup_candidates[0]) {
   rmdir(cgroup_root);
 }
@@ -184,7 +185,7 @@ TEST_F(OOMListenerTest, test_oom) {
   std::cout << "Consuming too much memory" << std::endl;
   for (;;) {
 auto buffer = (char *) malloc(bufferSize);
-if (buffer != nullptr) {
+if (buffer != NULL) {
   for (int i = 0; i < bufferSize; ++i) {
 buffer[i] = (char) std::rand();
   }
@@ -213,15 +214,15 @@ TEST_F(OOMListenerTest, test_oom) {
 if (listener == 0) {
   // child listener forwarding cgroup events
   _oom_listener_descriptors descriptors = {
-  .command = "test",
-  .event_fd = mock_oom_event_as_user,
-  .event_control_fd = -1,
-  .oom_control_fd = -1,
-  .event_control_path = {0},
-  .oom_control_path = {0},
-  .oom_command = {0},
-  .oom_command_len = 0,
-  .watch_timeout = 100
+  "test",
+  mock_oom_event_as_user,
+  -1,
+  -1,
+  {0},
+  {0},
+  {0},
+  0,
+  100
   };
   int ret = oom_listener(, GetCGroup(), test_pipe[1]);
   cleanup();
@@ -256,7 +257,7 @@ TEST_F(OOMListenerTest, test_oom) {
   __pid_t exited0 = wait(mem_hog_status);
   ASSERT_EQ(mem_hog_pid, exited0)
 << "Wrong process exited";
-  ASSERT_EQ(nullptr, mem_hog_status)
+  ASSERT_EQ(NULL, 

[27/50] [abbrv] hadoop git commit: YARN-8445. Improved error message for duplicated service and component names. Contributed by Chandni Singh

2018-06-25 Thread xkrogen
YARN-8445.  Improved error message for duplicated service and component names.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f15483c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f15483c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f15483c

Branch: refs/heads/HDFS-12943
Commit: 9f15483c5d7c94251f4c84e0155449188f202779
Parents: 43541a1
Author: Eric Yang 
Authored: Thu Jun 21 11:18:14 2018 -0400
Committer: Eric Yang 
Committed: Thu Jun 21 11:20:24 2018 -0400

--
 .../service/exceptions/RestApiErrorMessages.java  |  2 ++
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  5 +
 .../hadoop/yarn/service/TestServiceApiUtil.java   | 18 ++
 3 files changed, 25 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f15483c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 1d2d719..5b3c72c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -28,6 +28,8 @@ public interface RestApiErrorMessages {
   "than 63 characters";
   String ERROR_COMPONENT_NAME_INVALID =
   "Component name must be no more than %s characters: %s";
+  String ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME =
+  "Component name %s must not be same as service name %s";
   String ERROR_USER_NAME_INVALID =
   "User name must be no more than 63 characters";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f15483c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 5499273..705e040 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -143,6 +143,11 @@ public class ServiceApiUtil {
 throw new IllegalArgumentException(String.format(RestApiErrorMessages
 .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName()));
   }
+  if (service.getName().equals(comp.getName())) {
+throw new IllegalArgumentException(String.format(RestApiErrorMessages
+.ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME,
+comp.getName(), service.getName()));
+  }
   if (componentNames.contains(comp.getName())) {
 throw new IllegalArgumentException("Component name collision: " +
 comp.getName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f15483c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
index 243c6b3..ae031d4 100644
--- 

[31/50] [abbrv] hadoop git commit: YARN-8444: NodeResourceMonitor crashes on bad swapFree value. Contributed by Jim Brennan.

2018-06-25 Thread xkrogen
YARN-8444: NodeResourceMonitor crashes on bad swapFree value. Contributed by 
Jim Brennan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64321286
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64321286
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64321286

Branch: refs/heads/HDFS-12943
Commit: 6432128622d64f3f9dd638b9c254c77cdf5408aa
Parents: 30728ac
Author: Eric E Payne 
Authored: Fri Jun 22 17:15:29 2018 +
Committer: Eric E Payne 
Committed: Fri Jun 22 17:15:29 2018 +

--
 .../org/apache/hadoop/util/SysInfoLinux.java| 19 ++-
 .../apache/hadoop/util/TestSysInfoLinux.java| 60 
 2 files changed, 77 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64321286/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
index 7fd1990..2c2aca3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -217,6 +217,21 @@ public class SysInfoLinux extends SysInfo {
   }
 
   /**
+   *
+   * Wrapper for Long.parseLong() that returns zero if the value is
+   * invalid. Under some circumstances, swapFree in /proc/meminfo can
+   * go negative, reported as a very large decimal value.
+   */
+  private long safeParseLong(String strVal) {
+long parsedVal;
+try {
+  parsedVal = Long.parseLong(strVal);
+} catch (NumberFormatException nfe) {
+  parsedVal = 0;
+}
+return parsedVal;
+  }
+  /**
* Read /proc/meminfo, parse and compute memory information.
* @param readAgain if false, read only on the first time
*/
@@ -252,9 +267,9 @@ public class SysInfoLinux extends SysInfo {
   } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
 swapSize = Long.parseLong(mat.group(2));
   } else if (mat.group(1).equals(MEMFREE_STRING)) {
-ramSizeFree = Long.parseLong(mat.group(2));
+ramSizeFree = safeParseLong(mat.group(2));
   } else if (mat.group(1).equals(SWAPFREE_STRING)) {
-swapSizeFree = Long.parseLong(mat.group(2));
+swapSizeFree = safeParseLong(mat.group(2));
   } else if (mat.group(1).equals(INACTIVE_STRING)) {
 inactiveSize = Long.parseLong(mat.group(2));
   } else if (mat.group(1).equals(INACTIVEFILE_STRING)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64321286/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
index a646a41..0ae5d3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
@@ -161,6 +161,36 @@ public class TestSysInfoLinux {
 "DirectMap2M: 2027520 kB\n" +
 "DirectMap1G:132120576 kB\n";
 
+  static final String MEMINFO_FORMAT3 =
+"MemTotal:  %d kB\n" +
+"MemFree: %s kB\n" +
+"Buffers:138244 kB\n" +
+"Cached: 947780 kB\n" +
+"SwapCached: 142880 kB\n" +
+"Active:3229888 kB\n" +
+"Inactive:   %d kB\n" +
+"SwapTotal: %d kB\n" +
+"SwapFree:  %s kB\n" +
+"Dirty:  122012 kB\n" +
+"Writeback:   0 kB\n" +
+"AnonPages: 2710792 kB\n" +
+"Mapped:  24740 kB\n" +
+"Slab:   132528 kB\n" +
+"SReclaimable:   105096 kB\n" +
+"SUnreclaim:  27432 kB\n" +
+"PageTables:  11448 kB\n" +
+"NFS_Unstable:0 kB\n" +
+"Bounce:  0 kB\n" +
+"CommitLimit:   4125904 kB\n" +
+"Committed_AS:  4143556 kB\n" +
+"VmallocTotal: 34359738367 kB\n" +
+"VmallocUsed:  1632 kB\n" +
+"VmallocChunk: 34359736375 kB\n" +
+"HugePages_Total: %d\n" +
+"HugePages_Free:  0\n" +
+"HugePages_Rsvd:  0\n" +
+"Hugepagesize: 2048 kB";
+
   static final String CPUINFO_FORMAT =
 "processor : %s\n" +
 "vendor_id : AuthenticAMD\n" +
@@ -384,6 +414,36 @@ public class TestSysInfoLinux {
   (nrHugePages * 2048) + 

[19/50] [abbrv] hadoop git commit: YARN-8440. Typo in YarnConfiguration javadoc: "Miniumum request grant-able..". Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-8440. Typo in YarnConfiguration javadoc: "Miniumum request grant-able..". 
Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55432b09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55432b09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55432b09

Branch: refs/heads/HDFS-12943
Commit: 55432b09810b59ee361d0d4a8958efabb49fab3c
Parents: 9a9e969
Author: Miklos Szegedi 
Authored: Wed Jun 20 11:36:12 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 11:36:12 2018 -0700

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55432b09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5292a25..5842d64 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -240,7 +240,7 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" +
 DEFAULT_RM_SCHEDULER_PORT;
 
-  /** Miniumum request grant-able by the RM scheduler. */
+  /** Minimum request grant-able by the RM scheduler. */
   public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB =
 YARN_PREFIX + "scheduler.minimum-allocation-mb";
   public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HADOOP-14396. Add builder interface to FileContext. Contributed by Lei (Eddy) Xu.

2018-06-25 Thread xkrogen
HADOOP-14396. Add builder interface to FileContext.
Contributed by  Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ba4e623
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ba4e623
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ba4e623

Branch: refs/heads/HDFS-12943
Commit: 1ba4e62304a70d53f1a4f76995b6e1fac3107922
Parents: 440140c
Author: Steve Loughran 
Authored: Mon Jun 25 14:38:33 2018 +0100
Committer: Steve Loughran 
Committed: Mon Jun 25 14:38:57 2018 +0100

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java| 22 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 66 
 .../main/java/org/apache/hadoop/fs/Options.java |  3 +
 .../fs/FileContextMainOperationsBaseTest.java   | 44 -
 4 files changed, 134 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba4e623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 86c284a..d431293 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -116,6 +116,27 @@ public abstract class FSDataOutputStreamBuilder
   protected abstract B getThisBuilder();
 
   /**
+   * Construct from a {@link FileContext}.
+   *
+   * @param fc FileContext
+   * @param p path.
+   * @throws IOException
+   */
+  FSDataOutputStreamBuilder(@Nonnull FileContext fc,
+  @Nonnull Path p) throws IOException {
+Preconditions.checkNotNull(fc);
+Preconditions.checkNotNull(p);
+this.fs = null;
+this.path = p;
+
+AbstractFileSystem afs = fc.getFSofPath(p);
+FsServerDefaults defaults = afs.getServerDefaults(p);
+bufferSize = defaults.getFileBufferSize();
+replication = defaults.getReplication();
+blockSize = defaults.getBlockSize();
+  }
+
+  /**
* Constructor.
*/
   protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem,
@@ -131,6 +152,7 @@ public abstract class FSDataOutputStreamBuilder
   }
 
   protected FileSystem getFS() {
+Preconditions.checkNotNull(fs);
 return fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba4e623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 6ea69d0..5215c3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashSet;
@@ -35,6 +36,8 @@ import java.util.Stack;
 import java.util.TreeSet;
 import java.util.Map.Entry;
 
+import javax.annotation.Nonnull;
+
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -695,6 +698,69 @@ public class FileContext {
   }
 
   /**
+   * {@link FSDataOutputStreamBuilder} for {@liink FileContext}.
+   */
+  private static final class FCDataOutputStreamBuilder extends
+  FSDataOutputStreamBuilder<
+FSDataOutputStream, FCDataOutputStreamBuilder> {
+private final FileContext fc;
+
+private FCDataOutputStreamBuilder(
+@Nonnull FileContext fc, @Nonnull Path p) throws IOException {
+  super(fc, p);
+  this.fc = fc;
+  Preconditions.checkNotNull(fc);
+}
+
+@Override
+protected FCDataOutputStreamBuilder getThisBuilder() {
+  return this;
+}
+
+@Override
+public FSDataOutputStream build() throws IOException {
+  final EnumSet flags = getFlags();
+  List createOpts = new ArrayList<>(Arrays.asList(
+  CreateOpts.blockSize(getBlockSize()),
+  CreateOpts.bufferSize(getBufferSize()),
+  CreateOpts.repFac(getReplication()),
+  CreateOpts.perms(getPermission())
+  ));
+  if (getChecksumOpt() != null) {
+

[02/50] [abbrv] hadoop git commit: HDFS-13676. TestEditLogRace fails on Windows. Contributed by Zuoming Zhang.

2018-06-25 Thread xkrogen
HDFS-13676. TestEditLogRace fails on Windows. Contributed by Zuoming Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eebeb603
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eebeb603
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eebeb603

Branch: refs/heads/HDFS-12943
Commit: eebeb6033fd09791fcbff626f128a98e393f0a88
Parents: 43d994e
Author: Inigo Goiri 
Authored: Fri Jun 15 13:07:54 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Jun 15 13:07:54 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/TestEditLogRace.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eebeb603/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
index 46010e0..10f571c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
@@ -84,6 +84,8 @@ public class TestEditLogRace {
 TestEditLogRace.useAsyncEditLog = useAsyncEditLog;
   }
 
+  private static final String NAME_DIR = MiniDFSCluster.getBaseDirectory() + 
"name-0-1";
+
   private static final Log LOG = LogFactory.getLog(TestEditLogRace.class);
 
   // This test creates NUM_THREADS threads and each thread continuously writes
@@ -363,8 +365,8 @@ public class TestEditLogRace {
 useAsyncEditLog);
 FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
 conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-//conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR);
-//conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
+conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR);
+conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
 conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
 return conf;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: HADOOP-15549. Upgrade to commons-configuration 2.1 regresses task CPU consumption

2018-06-25 Thread xkrogen
HADOOP-15549. Upgrade to commons-configuration 2.1 regresses task CPU 
consumption


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59de9679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59de9679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59de9679

Branch: refs/heads/HDFS-12943
Commit: 59de9679540f6d0edfb34cf9f88e52b51d94b4f4
Parents: 9f15483
Author: Todd Lipcon 
Authored: Thu Jun 21 10:32:52 2018 -0700
Committer: Todd Lipcon 
Committed: Thu Jun 21 10:32:52 2018 -0700

--
 .../apache/hadoop/metrics2/impl/MetricsConfig.java   | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59de9679/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index 027450c..976f16b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -37,10 +37,8 @@ import com.google.common.collect.Maps;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.commons.configuration2.PropertiesConfiguration;
 import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.configuration2.builder.fluent.Configurations;
-import org.apache.commons.configuration2.builder.fluent.Parameters;
-import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
 import org.apache.commons.configuration2.ex.ConfigurationException;
+import org.apache.commons.configuration2.io.FileHandler;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsPlugin;
 import org.apache.hadoop.metrics2.filter.GlobFilter;
@@ -112,12 +110,11 @@ class MetricsConfig extends SubsetConfiguration {
   static MetricsConfig loadFirst(String prefix, String... fileNames) {
 for (String fname : fileNames) {
   try {
-Configuration cf = new Configurations().propertiesBuilder(fname)
-.configure(new Parameters().properties()
-.setFileName(fname)
-.setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
-  .getConfiguration()
-  .interpolatedConfiguration();
+PropertiesConfiguration pcf = new PropertiesConfiguration();
+FileHandler fh = new FileHandler(pcf);
+fh.setFileName(fname);
+fh.load();
+Configuration cf = pcf.interpolatedConfiguration();
 LOG.info("Loaded properties from {}", fname);
 if (LOG.isDebugEnabled()) {
   LOG.debug("Properties: {}", toString(cf));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: MAPREDUCE-7113. Typos in test names in TestTaskAttempt: "testAppDiognostic". Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
MAPREDUCE-7113. Typos in test names in TestTaskAttempt: "testAppDiognostic". 
Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6ee4290
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6ee4290
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6ee4290

Branch: refs/heads/HDFS-12943
Commit: d6ee4290df10ad3e0b087c21accd602508e0a197
Parents: 46f9058
Author: Miklos Szegedi 
Authored: Wed Jun 20 12:04:44 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 12:04:44 2018 -0700

--
 .../apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6ee4290/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index 609923f..b1b7b8f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -853,7 +853,7 @@ public class TestTaskAttempt{
 
 
   @Test
-  public void testAppDiognosticEventOnUnassignedTask() throws Exception {
+  public void testAppDiagnosticEventOnUnassignedTask() {
 ApplicationId appId = ApplicationId.newInstance(1, 2);
 ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
 appId, 0);
@@ -978,7 +978,7 @@ public class TestTaskAttempt{
   }
 
   @Test
-  public void testAppDiognosticEventOnNewTask() throws Exception {
+  public void testAppDiagnosticEventOnNewTask() {
 ApplicationId appId = ApplicationId.newInstance(1, 2);
 ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
 appId, 0);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread xkrogen
HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abc3e4ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abc3e4ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abc3e4ba

Branch: refs/heads/HDFS-12943
Commit: abc3e4bad905efde5a4881e8a072c68f6e910ade
Parents: 1ba4e62
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:50:27 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abc3e4ba/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 0e337b4..d5622af 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -689,17 +689,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-13681. Fix TestStartup.testNNFailToStartOnReadOnlyNNDir test failure on Windows. Contributed by Xiao Liang.

2018-06-25 Thread xkrogen
HDFS-13681. Fix TestStartup.testNNFailToStartOnReadOnlyNNDir test failure on 
Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8762e9cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8762e9cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8762e9cf

Branch: refs/heads/HDFS-12943
Commit: 8762e9cf10fa100dd5f7fd695f5e52b75a94c5d4
Parents: d31a3ce
Author: Inigo Goiri 
Authored: Fri Jun 15 16:49:06 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Jun 15 16:49:06 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestStartup.java | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8762e9cf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index d5f5487..2401608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -728,8 +728,8 @@ public class TestStartup {
   assertTrue(nnDirs.iterator().hasNext());
   assertEquals(
   "NN dir should be created after NN startup.",
-  nnDirStr,
-  nnDirs.iterator().next().getPath());
+  new File(nnDirStr),
+  new File(nnDirs.iterator().next().getPath()));
   final File nnDir = new File(nnDirStr);
   assertTrue(nnDir.exists());
   assertTrue(nnDir.isDirectory());
@@ -738,7 +738,7 @@ public class TestStartup {
 /* set read only */
 assertTrue(
 "Setting NN dir read only should succeed.",
-nnDir.setReadOnly());
+FileUtil.setWritable(nnDir, false));
 cluster.restartNameNodes();
 fail("Restarting NN should fail on read only NN dir.");
   } catch (InconsistentFSStateException e) {
@@ -750,7 +750,8 @@ public class TestStartup {
 "storage directory does not exist or is not accessible.";
   } finally {
 /* set back to writable in order to clean it */
-assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true));
+assertTrue("Setting NN dir should succeed.",
+FileUtil.setWritable(nnDir, true));
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDFS-13692. StorageInfoDefragmenter floods log when compacting StorageInfo TreeSet. Contributed by Bharat Viswanadham.

2018-06-25 Thread xkrogen
HDFS-13692. StorageInfoDefragmenter floods log when compacting StorageInfo 
TreeSet. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30728ace
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30728ace
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30728ace

Branch: refs/heads/HDFS-12943
Commit: 30728aced4a6b05394b3fc8c613f39fade9cf3c2
Parents: 9994856
Author: Yiqun Lin 
Authored: Fri Jun 22 10:50:54 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Jun 22 10:50:54 2018 +0800

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30728ace/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 76a7781..72ea1c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4576,7 +4576,7 @@ public class BlockManager implements BlockStatsMXBean {
   datanodesAndStorages.add(node.getDatanodeUuid());
   datanodesAndStorages.add(storage.getStorageID());
 }
-LOG.info("StorageInfo TreeSet fill ratio {} : {}{}",
+LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}",
  storage.getStorageID(), ratio,
  (ratio < storageInfoDefragmentRatio)
  ? " (queued for defragmentation)" : "");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-7449. Split up class TestYarnClient to TestYarnClient and TestYarnClientImpl. Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-7449. Split up class TestYarnClient to TestYarnClient and 
TestYarnClientImpl. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbbc7cc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbbc7cc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbbc7cc4

Branch: refs/heads/HDFS-12943
Commit: bbbc7cc426f71ad0fe4174efcd25e5ac3f62b501
Parents: 55432b0
Author: Miklos Szegedi 
Authored: Wed Jun 20 11:40:56 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 11:40:56 2018 -0700

--
 .../yarn/client/api/impl/TestYarnClient.java| 337 ---
 .../client/api/impl/TestYarnClientImpl.java | 254 ++
 2 files changed, 324 insertions(+), 267 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbbc7cc4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 70ff47b..17e43ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -18,41 +18,9 @@
 
 package org.apache.hadoop.yarn.client.api.impl;
 
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.lang.Thread.State;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputByteBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
@@ -74,7 +42,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -92,7 +59,6 @@ import 
org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.AHSClient;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.YarnClientApplication;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -100,7 +66,6 @@ import 
org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import 
org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import 

[04/50] [abbrv] hadoop git commit: HDDS-172. The numbers of operation should be integer in KSM UI. Contributed by Takanobu Asanuma.

2018-06-25 Thread xkrogen
HDDS-172. The numbers of operation should be integer in KSM UI.
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/308a1591
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/308a1591
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/308a1591

Branch: refs/heads/HDFS-12943
Commit: 308a1591f9f41597f4e7cc17bca06c66d6efc0a2
Parents: c966a38
Author: Anu Engineer 
Authored: Fri Jun 15 10:23:58 2018 -0700
Committer: Anu Engineer 
Committed: Fri Jun 15 14:10:17 2018 -0700

--
 hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/308a1591/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
--
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js 
b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
index 7fb52b1..ab6f73b 100644
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
@@ -48,7 +48,9 @@
 labelType: 'value',
 duration: 500,
 labelThreshold: 0.01,
-labelSunbeamLayout: true,
+valueFormat: function(d) {
+return d3.format('d')(d);
+},
 legend: {
 margin: {
 top: 5,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HDFS-13621. Upgrade commons-lang version to 3.7 in hadoop-hdfs-project. Contributed by Takanobu Asanuma.

2018-06-25 Thread xkrogen
HDFS-13621. Upgrade commons-lang version to 3.7 in hadoop-hdfs-project. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fba9d7cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fba9d7cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fba9d7cd

Branch: refs/heads/HDFS-12943
Commit: fba9d7cd746cd7b659d2fd9d2bfa23266be9009b
Parents: 2a4632d
Author: Akira Ajisaka 
Authored: Mon Jun 18 10:17:07 2018 -0700
Committer: Akira Ajisaka 
Committed: Mon Jun 18 10:17:07 2018 -0700

--
 .../src/main/java/org/apache/hadoop/fs/XAttr.java   |  4 ++--
 .../java/org/apache/hadoop/hdfs/ExtendedBlockId.java|  4 ++--
 .../hadoop/hdfs/client/impl/BlockReaderFactory.java |  2 +-
 .../hdfs/protocol/AddErasureCodingPolicyResponse.java   |  4 ++--
 .../apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java |  4 ++--
 .../org/apache/hadoop/hdfs/protocol/CachePoolInfo.java  |  4 ++--
 .../org/apache/hadoop/hdfs/protocol/EncryptionZone.java |  4 ++--
 .../hadoop/hdfs/protocol/ErasureCodingPolicy.java   |  4 ++--
 .../hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java   |  4 ++--
 .../org/apache/hadoop/hdfs/protocol/ExtendedBlock.java  |  2 +-
 .../hadoop/hdfs/shortcircuit/DfsClientShmManager.java   |  2 +-
 .../hadoop/hdfs/shortcircuit/ShortCircuitCache.java |  2 +-
 .../hadoop/hdfs/shortcircuit/ShortCircuitShm.java   |  4 ++--
 .../namenode/ha/TestRequestHedgingProxyProvider.java|  2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |  5 -
 .../hdfs/server/federation/router/ConnectionPoolId.java |  2 +-
 .../server/federation/router/RemoteLocationContext.java |  2 +-
 .../store/driver/impl/StateStoreFileImpl.java   |  2 +-
 .../server/federation/store/records/MountTable.java |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  5 -
 .../org/apache/hadoop/hdfs/protocol/CacheDirective.java |  2 +-
 .../hdfs/qjournal/server/GetJournalEditServlet.java |  4 ++--
 .../org/apache/hadoop/hdfs/qjournal/server/Journal.java | 12 ++--
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   |  2 +-
 .../server/datanode/fsdataset/impl/FsDatasetCache.java  |  2 +-
 .../hdfs/server/diskbalancer/command/Command.java   |  4 ++--
 .../hdfs/server/diskbalancer/command/PlanCommand.java   |  4 ++--
 .../hdfs/server/diskbalancer/command/ReportCommand.java |  4 ++--
 .../hdfs/server/namenode/EncryptionZoneManager.java |  4 ++--
 .../hdfs/server/namenode/FSDirErasureCodingOp.java  |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  6 +++---
 .../hdfs/server/namenode/INodeAttributeProvider.java|  2 +-
 .../hdfs/server/namenode/startupprogress/Step.java  |  6 +++---
 .../hdfs/server/namenode/top/metrics/TopMetrics.java|  2 +-
 .../java/org/apache/hadoop/hdfs/tools/CacheAdmin.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/util/EnumCounters.java  |  2 +-
 .../apache/hadoop/fs/TestEnhancedByteBufferAccess.java  |  4 ++--
 .../test/java/org/apache/hadoop/fs/TestGlobPaths.java   |  2 +-
 .../hadoop/fs/TestWebHdfsFileContextMainOperations.java |  2 +-
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  4 ++--
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDecommission.java   |  2 +-
 .../org/apache/hadoop/hdfs/TestFsShellPermission.java   |  2 +-
 .../org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java  |  2 +-
 .../apache/hadoop/hdfs/TestLeaseRecoveryStriped.java|  2 +-
 .../datatransfer/sasl/SaslDataTransferTestCase.java |  2 +-
 .../hadoop/hdfs/server/balancer/TestBalancer.java   |  2 +-
 .../hadoop/hdfs/server/datanode/SimulatedFSDataset.java |  2 +-
 .../diskbalancer/command/TestDiskBalancerCommand.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSImageTestUtil.java|  2 +-
 .../hadoop/hdfs/server/namenode/NameNodeAdapter.java|  2 +-
 .../hadoop/hdfs/server/namenode/TestAuditLogger.java|  2 +-
 .../hdfs/server/namenode/TestCacheDirectives.java   |  2 +-
 .../server/namenode/TestEditLogJournalFailures.java |  2 +-
 .../hadoop/hdfs/shortcircuit/TestShortCircuitCache.java |  2 +-
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java |  2 +-
 .../java/org/apache/hadoop/tracing/TestTracing.java |  2 +-
 57 files changed, 80 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fba9d7cd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
index ad7b056..de9bbda 100644
--- 

[21/50] [abbrv] hadoop git commit: YARN-8442. Strange characters and missing spaces in FairScheduler documentation. Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-8442. Strange characters and missing spaces in FairScheduler 
documentation. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/388fafa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/388fafa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/388fafa0

Branch: refs/heads/HDFS-12943
Commit: 388fafa004dc405a4e10f4487cff7c5a714af32f
Parents: bbbc7cc
Author: Miklos Szegedi 
Authored: Wed Jun 20 11:55:43 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 11:55:43 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/388fafa0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index e253d0d..269f5b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -148,7 +148,7 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **secondaryGroupExistingQueue**: the app is placed into a queue with a 
name that matches a secondary group of the user who submitted it. The first 
secondary group that matches a configured queue will be selected. Periods in 
group names will be replaced with "\_dot\_", i.e. a user with "one.two" as one 
of their secondary groups would be placed into the "one\_dot\_two" queue, if 
such a queue exists.
 
-* **nestedUserQueue**: the app is placed into a queue with the name of the 
user under the queue suggested by the nested rule. This is similar to 
‘user’ rule,the difference being in 'nestedUserQueue' rule,user 
queues can be created under any parent queue, while 'user' rule creates user 
queues only under root queue. Note that nestedUserQueue rule would be applied 
only if the nested rule returns a parent queue.One can configure a parent queue 
either by setting 'type' attribute of queue to 'parent' or by configuring at 
least one leaf under that queue which makes it a parent. See example allocation 
for a sample use case.
+* **nestedUserQueue**: the app is placed into a queue with the name of the 
user under the queue suggested by the nested rule. This is similar to the 
'user' rule, the difference being in 'nestedUserQueue' rule, user queues can be 
created under any parent queue, while 'user' rule creates user queues only 
under root queue. Note that nestedUserQueue rule would be applied only if the 
nested rule returns a parent queue. One can configure a parent queue either by 
setting 'type' attribute of queue to 'parent' or by configuring at least one 
leaf under that queue which makes it a parent. See example allocation for a 
sample use case.
 
 * **default**: the app is placed into the queue specified in the 'queue' 
attribute of the default rule. If 'queue' attribute is not specified, the app 
is placed into 'root.default' queue.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-8391. Investigate AllocationFileLoaderService.reloadListener locking issue. Contributed by Szilard Nemeth.

2018-06-25 Thread xkrogen
YARN-8391. Investigate AllocationFileLoaderService.reloadListener locking 
issue. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a9e9695
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a9e9695
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a9e9695

Branch: refs/heads/HDFS-12943
Commit: 9a9e969570f23b627f9571819f388916d8fd7ec9
Parents: 4939ffe
Author: Miklos Szegedi 
Authored: Wed Jun 20 10:29:12 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Jun 20 10:59:33 2018 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a9e9695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 56cc887..3300948 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -114,7 +114,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
   reloadThread = new Thread(() -> {
 while (running) {
   try {
-reloadListener.onCheck();
+synchronized (this) {
+  reloadListener.onCheck();
+}
 long time = clock.getTime();
 long lastModified =
 fs.getFileStatus(allocFile).getModificationTime();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: YARN-7668. Remove unused variables from ContainerLocalizer

2018-06-25 Thread xkrogen
YARN-7668. Remove unused variables from ContainerLocalizer

This closes #364

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f386e78a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f386e78a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f386e78a

Branch: refs/heads/HDFS-12943
Commit: f386e78a4bc74e7c247b179c7d4ec27310fda4d3
Parents: 1e94e59
Author: Dedunu Dhananjaya 
Authored: Mon Apr 23 09:34:57 2018 +
Committer: Akira Ajisaka 
Committed: Mon Jun 18 16:45:50 2018 -0700

--
 .../nodemanager/containermanager/localizer/ContainerLocalizer.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f386e78a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 639a69d..6a384ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -93,9 +93,7 @@ public class ContainerLocalizer {
   public static final String FILECACHE = "filecache";
   public static final String APPCACHE = "appcache";
   public static final String USERCACHE = "usercache";
-  public static final String OUTPUTDIR = "output";
   public static final String TOKEN_FILE_NAME_FMT = "%s.tokens";
-  public static final String WORKDIR = "work";
   private static final String APPCACHE_CTXT_FMT = "%s.app.cache.dirs";
   private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs";
   private static final FsPermission FILECACHE_PERMS =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HADOOP-13186. Multipart Uploader API. Contributed by Ewan Higgs

2018-06-25 Thread xkrogen
HADOOP-13186. Multipart Uploader API. Contributed by Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/980031bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/980031bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/980031bb

Branch: refs/heads/HDFS-12943
Commit: 980031bb043dd026a6bf42b0e71d304ac89294a5
Parents: 3905fdb
Author: Chris Douglas 
Authored: Sun Jun 17 11:54:26 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 11:54:26 2018 -0700

--
 .../java/org/apache/hadoop/fs/BBPartHandle.java |  58 +++
 .../org/apache/hadoop/fs/BBUploadHandle.java|  57 +++
 .../hadoop/fs/FileSystemMultipartUploader.java  | 132 
 .../hadoop/fs/LocalFileSystemPathHandle.java| 100 +
 .../org/apache/hadoop/fs/MultipartUploader.java |  90 +++
 .../hadoop/fs/MultipartUploaderFactory.java |  65 
 .../java/org/apache/hadoop/fs/PartHandle.java   |  45 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  61 +++-
 .../UnsupportedMultipartUploaderException.java  |  41 +
 .../java/org/apache/hadoop/fs/UploadHandle.java |  47 ++
 .../hadoop-common/src/main/proto/FSProtos.proto |   8 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  16 ++
 .../fs/AbstractSystemMultipartUploaderTest.java | 143 ++
 .../TestLocalFileSystemMultipartUploader.java   |  65 
 .../AbstractContractPathHandleTest.java |   6 +
 .../TestRawlocalContractPathHandle.java |  40 +
 .../src/test/resources/contract/rawlocal.xml|   5 +
 .../hdfs/DFSMultipartUploaderFactory.java   |  40 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  16 ++
 .../hadoop/fs/TestHDFSMultipartUploader.java|  76 ++
 .../hadoop/fs/s3a/S3AMultipartUploader.java | 150 +++
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  15 ++
 .../org.apache.hadoop.fs.MultipartUploader  |  16 ++
 23 files changed, 1290 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/980031bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
new file mode 100644
index 000..e1336b8
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Byte array backed part handle.
+ */
+public final class BBPartHandle implements PartHandle {
+
+  private static final long serialVersionUID = 0x23ce3eb1;
+
+  private final byte[] bytes;
+
+  private BBPartHandle(ByteBuffer byteBuffer){
+this.bytes = byteBuffer.array();
+  }
+
+  public static PartHandle from(ByteBuffer byteBuffer) {
+return new BBPartHandle(byteBuffer);
+  }
+
+  @Override
+  public ByteBuffer bytes() {
+return ByteBuffer.wrap(bytes);
+  }
+
+  @Override
+  public int hashCode() {
+return Arrays.hashCode(bytes);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (!(other instanceof PartHandle)) {
+  return false;
+
+}
+PartHandle o = (PartHandle) other;
+return bytes().equals(o.bytes());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/980031bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
new file mode 100644
index 000..6430c14
--- /dev/null
+++ 

[03/50] [abbrv] hadoop git commit: HDFS-13174. hdfs mover -p /path times out after 20 min. Contributed by Istvan Fajth.

2018-06-25 Thread xkrogen
HDFS-13174. hdfs mover -p /path times out after 20 min. Contributed by Istvan 
Fajth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c966a383
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c966a383
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c966a383

Branch: refs/heads/HDFS-12943
Commit: c966a3837af1c1a1c4a441f491b0d76d5c9e5d78
Parents: eebeb60
Author: Wei-Chiu Chuang 
Authored: Fri Jun 15 13:35:50 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jun 15 13:36:46 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  6 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 30 +---
 .../src/main/resources/hdfs-default.xml | 10 +++
 .../hdfs/server/balancer/TestBalancer.java  | 79 
 .../hadoop/hdfs/server/mover/TestMover.java | 46 
 6 files changed, 163 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c966a383/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bc8e81f..dde7eb7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -581,7 +581,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_BALANCER_BLOCK_MOVE_TIMEOUT = 
"dfs.balancer.block-move.timeout";
   public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0;
   public static final String  DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.balancer.max-no-move-interval";
-  public static final intDFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 
60*1000; // One minute
+  public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 
60*1000; // One minute
+  public static final String  DFS_BALANCER_MAX_ITERATION_TIME_KEY = 
"dfs.balancer.max-iteration-time";
+  public static final longDFS_BALANCER_MAX_ITERATION_TIME_DEFAULT = 20 * 
60 * 1000L; // 20 mins
 
 
   public static final String  DFS_MOVER_MOVEDWINWIDTH_KEY = 
"dfs.mover.movedWinWidth";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c966a383/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 13d5846..426c7ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -289,13 +289,17 @@ public class Balancer {
 final int maxNoMoveInterval = conf.getInt(
 DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT);
+final long maxIterationTime = conf.getLong(
+DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
+DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
 p.getExcludedNodes(), movedWinWidth, moverThreads,
 dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
-getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, conf);
+getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval,
+maxIterationTime, conf);
 this.threshold = p.getThreshold();
 this.policy = p.getBalancingPolicy();
 this.sourceNodes = p.getSourceNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c966a383/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 349ced1..060c013 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 

[05/50] [abbrv] hadoop git commit: HADOOP-15504. Upgrade Maven Core and Maven Wagon dependencies.

2018-06-25 Thread xkrogen
HADOOP-15504. Upgrade Maven Core and Maven Wagon dependencies.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1da3b556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1da3b556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1da3b556

Branch: refs/heads/HDFS-12943
Commit: 1da3b556591fffaae0751c4f7fceda34d2314fda
Parents: 308a159
Author: Sean Mackrory 
Authored: Tue May 1 08:56:10 2018 -0600
Committer: Sean Mackrory 
Committed: Fri Jun 15 16:06:26 2018 -0600

--
 hadoop-maven-plugins/pom.xml| 38 +++-
 .../maven/plugin/resourcegz/ResourceGzMojo.java |  2 +-
 pom.xml |  2 +-
 3 files changed, 31 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1da3b556/hadoop-maven-plugins/pom.xml
--
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index b31d158..d6b18b4 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -26,7 +26,7 @@
   maven-plugin
   Apache Hadoop Maven Plugins
   
-3.0
+3.0.5
 3.5.1
   
   
@@ -45,6 +45,14 @@
   maven-plugin-annotations
   ${maven.plugin-tools.version}
   provided
+  
+
+
+  org.apache.maven
+  maven-artifact
+
+  
 
 
   commons-io
@@ -60,16 +68,28 @@
   ${maven-shade-plugin.version}
   provided
   
-
+
+
+  org.apache.maven
+  maven-artifact
+
+
+  org.apache.maven
+  maven-compat
+
+
+  org.apache.maven
+  maven-core
+
+
+  org.apache.maven
+  maven-model
+
 
-  org.apache.maven.shared
-  maven-dependency-tree
+  org.apache.maven
+  maven-plugin-api
 
-
 
   org.vafer
   jdependency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1da3b556/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
--
diff --git 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
index e7ab663..5bf84c2 100644
--- 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
+++ 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java
@@ -13,7 +13,7 @@
  */
 package org.apache.hadoop.maven.plugin.resourcegz;
 
-import com.google.inject.internal.util.Lists;
+import com.google.common.collect.Lists;
 import org.apache.commons.io.IOUtils;
 import org.apache.maven.plugin.AbstractMojo;
 import org.apache.maven.plugin.MojoExecutionException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1da3b556/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 3695be0..a250e64 100644
--- a/pom.xml
+++ b/pom.xml
@@ -106,7 +106,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
1.5
 3.0.1
 0.12
-1.0
+2.4
 3.3.0
 2.5.0
 1.0.0


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-192:Create new SCMCommand to request a replication of a container. Contributed by Elek Marton

2018-06-25 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 35ec9401e -> 238fe00ad


HDDS-192:Create new SCMCommand to request a replication of a container. 
Contributed by Elek Marton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/238fe00a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/238fe00a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/238fe00a

Branch: refs/heads/trunk
Commit: 238fe00ad2692154f6a382f35735169ee5e4af2c
Parents: 35ec940
Author: Bharat Viswanadham 
Authored: Mon Jun 25 21:12:05 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Jun 25 21:12:05 2018 -0700

--
 .../statemachine/DatanodeStateMachine.java  |  3 +
 .../ReplicateContainerCommandHandler.java   | 67 ++
 .../states/endpoint/HeartbeatEndpointTask.java  | 12 +++
 .../commands/ReplicateContainerCommand.java | 94 
 .../StorageContainerDatanodeProtocol.proto  | 12 ++-
 .../scm/server/SCMDatanodeProtocolServer.java   | 11 +++
 .../TestReplicateContainerHandler.java  | 71 +++
 7 files changed, 269 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/238fe00a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index dc4e673..b073d7b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -32,6 +32,8 @@ import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .CommandDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .DeleteBlocksCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
+.ReplicateContainerCommandHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
@@ -95,6 +97,7 @@ public class DatanodeStateMachine implements Closeable {
 .addHandler(new CloseContainerCommandHandler())
 .addHandler(new DeleteBlocksCommandHandler(
 container.getContainerManager(), conf))
+.addHandler(new ReplicateContainerCommandHandler())
 .setConnectionManager(connectionManager)
 .setContainer(container)
 .setContext(context)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/238fe00a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
new file mode 100644
index 000..b4e83b7
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+

hadoop git commit: YARN-8438. TestContainer.testKillOnNew flaky on trunk. Contributed by Szilard Nemeth.

2018-06-25 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7a3c6e9c3 -> 35ec9401e


YARN-8438. TestContainer.testKillOnNew flaky on trunk. Contributed by Szilard 
Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35ec9401
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35ec9401
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35ec9401

Branch: refs/heads/trunk
Commit: 35ec9401e829bfa10994790659a26b0babacae35
Parents: 7a3c6e9
Author: Miklos Szegedi 
Authored: Mon Jun 25 09:23:11 2018 -0700
Committer: Miklos Szegedi 
Committed: Mon Jun 25 15:47:54 2018 -0700

--
 .../containermanager/container/TestContainer.java| 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35ec9401/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 1a263ee..edf26d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -116,7 +116,7 @@ public class TestContainer {
   final NodeManagerMetrics metrics = NodeManagerMetrics.create();
   final Configuration conf = new YarnConfiguration();
   final String FAKE_LOCALIZATION_ERROR = "Fake localization error";
-  
+
   /**
* Verify correct container request events sent to localizer.
*/
@@ -591,9 +591,8 @@ public class TestContainer {
   Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
   containerMetrics.exitCode.value());
   Assert.assertTrue(containerMetrics.startTime.value() > 0);
-  Assert.assertTrue(
-  containerMetrics.finishTime.value() > containerMetrics.startTime
-  .value());
+  Assert.assertTrue(containerMetrics.finishTime.value() >=
+  containerMetrics.startTime.value());
   Assert.assertEquals(ContainerEventType.KILL_CONTAINER,
   wc.initStateToEvent.get(ContainerState.NEW));
   Assert.assertEquals(ContainerState.DONE,
@@ -1612,4 +1611,5 @@ public class TestContainer {
   return ((ContainerImpl)c).getRetryPolicy();
 }
   }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15550. Avoid static initialization of ObjectMappers

2018-06-25 Thread todd
Repository: hadoop
Updated Branches:
  refs/heads/trunk c687a6617 -> 7a3c6e9c3


HADOOP-15550. Avoid static initialization of ObjectMappers


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a3c6e9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a3c6e9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a3c6e9c

Branch: refs/heads/trunk
Commit: 7a3c6e9c3cd9ffdc71946fd12f5c3d59718c4939
Parents: c687a66
Author: Todd Lipcon 
Authored: Mon Jun 25 15:36:45 2018 -0700
Committer: Todd Lipcon 
Committed: Mon Jun 25 15:36:45 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   |  7 ++
 .../web/DelegationTokenAuthenticator.java   |  8 ++-
 .../apache/hadoop/util/HttpExceptionUtils.java  | 12 ++
 .../apache/hadoop/util/JsonSerialization.java   | 24 
 .../crypto/key/kms/server/KMSJSONWriter.java|  6 ++---
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  7 ++
 ...onfRefreshTokenBasedAccessTokenProvider.java |  8 +++
 .../CredentialBasedAccessTokenProvider.java |  8 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   |  8 +++
 .../hadoop/fs/azure/security/JsonUtils.java |  4 ++--
 10 files changed, 45 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a3c6e9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index edbf897..7b46075 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.security.token.TokenRenewer;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.JsonSerialization;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
@@ -79,7 +80,6 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
@@ -132,9 +132,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private final ValueQueue encKeyVersionQueue;
 
-  private static final ObjectWriter WRITER =
-  new ObjectMapper().writerWithDefaultPrettyPrinter();
-
   private final Text dtService;
 
   // Allow fallback to default kms server port 9600 for certain tests that do
@@ -237,7 +234,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static void writeJson(Object obj, OutputStream os)
   throws IOException {
 Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
-WRITER.writeValue(writer, obj);
+JsonSerialization.writer().writeValue(writer, obj);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a3c6e9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 617773b..0ae2af3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.security.token.delegation.web;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import 

hadoop git commit: HADOOP-15423. Merge fileCache and dirCache into ine single cache in LocalMetadataStore. Contributed by Gabor Bota.

2018-06-25 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/trunk a55d6bba7 -> c687a6617


HADOOP-15423. Merge fileCache and dirCache into ine single cache in 
LocalMetadataStore. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c687a661
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c687a661
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c687a661

Branch: refs/heads/trunk
Commit: c687a6617d73293019d8d91ac48bbfd2ccca3b40
Parents: a55d6bb
Author: Sean Mackrory 
Authored: Mon Jun 25 11:04:34 2018 -0600
Committer: Sean Mackrory 
Committed: Mon Jun 25 14:59:41 2018 -0600

--
 .../fs/s3a/s3guard/LocalMetadataEntry.java  |  81 ++
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 247 +++
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |   2 +-
 .../fs/s3a/s3guard/TestLocalMetadataStore.java  |  33 ++-
 4 files changed, 240 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c687a661/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
new file mode 100644
index 000..6040d67
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import javax.annotation.Nullable;
+
+/**
+ * LocalMetadataEntry is used to store entries in the cache of
+ * LocalMetadataStore. PathMetadata or dirListingMetadata can be null. The
+ * entry is not immutable.
+ */
+public final class LocalMetadataEntry {
+  @Nullable
+  private PathMetadata pathMetadata;
+  @Nullable
+  private DirListingMetadata dirListingMetadata;
+
+  LocalMetadataEntry(PathMetadata pmd){
+pathMetadata = pmd;
+dirListingMetadata = null;
+  }
+
+  LocalMetadataEntry(DirListingMetadata dlm){
+pathMetadata = null;
+dirListingMetadata = dlm;
+  }
+
+  public PathMetadata getFileMeta() {
+return pathMetadata;
+  }
+
+  public DirListingMetadata getDirListingMeta() {
+return dirListingMetadata;
+  }
+
+
+  public boolean hasPathMeta() {
+return this.pathMetadata != null;
+  }
+
+  public boolean hasDirMeta() {
+return this.dirListingMetadata != null;
+  }
+
+  public void setPathMetadata(PathMetadata pathMetadata) {
+this.pathMetadata = pathMetadata;
+  }
+
+  public void setDirListingMetadata(DirListingMetadata dirListingMetadata) {
+this.dirListingMetadata = dirListingMetadata;
+  }
+
+  @Override public String toString() {
+StringBuilder sb = new StringBuilder();
+sb.append("LocalMetadataEntry{");
+if(pathMetadata != null) {
+  sb.append("pathMetadata=" + pathMetadata.getFileStatus().getPath());
+}
+if(dirListingMetadata != null){
+  sb.append("; dirListingMetadata=" + dirListingMetadata.getPath());
+}
+sb.append("}");
+return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c687a661/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 95689e1..49981ed 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -37,13 +37,12 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.Collection;
 import java.util.HashMap;
-import 

hadoop git commit: HDDS-191. Queue SCMCommands via EventQueue in SCM. Contributed by Elek, Marton.

2018-06-25 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4ffe68a6f -> a55d6bba7


HDDS-191. Queue SCMCommands via EventQueue in SCM.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a55d6bba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a55d6bba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a55d6bba

Branch: refs/heads/trunk
Commit: a55d6bba71c81c1c4e9d8cd11f55c78f10a548b0
Parents: 4ffe68a
Author: Anu Engineer 
Authored: Mon Jun 25 13:05:22 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jun 25 13:05:22 2018 -0700

--
 .../protocol/commands/CommandForDatanode.java   | 45 
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 20 -
 .../scm/server/StorageContainerManager.java |  8 +++-
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 39 +
 4 files changed, 110 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55d6bba/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
new file mode 100644
index 000..0c4964a
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import java.util.UUID;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * Command for the datanode with the destination address.
+ */
+public class CommandForDatanode {
+
+  private final UUID datanodeId;
+
+  private final SCMCommand command;
+
+  public CommandForDatanode(UUID datanodeId, SCMCommand command) {
+this.datanodeId = datanodeId;
+this.command = command;
+  }
+
+  public UUID getDatanodeId() {
+return datanodeId;
+  }
+
+  public SCMCommand getCommand() {
+return command;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55d6bba/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index b339fb7..fc8b013 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,6 +25,10 @@ import 
org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -42,11 +46,14 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
 import 

hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui. Contributed by Sunil Govindan.

2018-06-25 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 d99dd1e79 -> a3d2ac683


YARN-8457. Compilation is broken with -Pyarn-ui. Contributed by Sunil Govindan.

(cherry picked from commit 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3d2ac68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3d2ac68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3d2ac68

Branch: refs/heads/branch-2.9
Commit: a3d2ac68383772ce7726d1858f6f7f4e7030cffc
Parents: d99dd1e
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:42:20 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d2ac68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-06-25 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1d457673c -> f359b7e5e


YARN-8457. Compilation is broken with -Pyarn-ui.

(cherry picked from commit 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f359b7e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f359b7e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f359b7e5

Branch: refs/heads/branch-2
Commit: f359b7e5e997a9e32f091140ccabfd45df15295d
Parents: 1d45767
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:40:40 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f359b7e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-06-25 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f3efeb899 -> 1fa79f006


YARN-8457. Compilation is broken with -Pyarn-ui.

(cherry picked from commit 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fa79f00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fa79f00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fa79f00

Branch: refs/heads/branch-3.0
Commit: 1fa79f00634eba3115ba8b5f435c1bc9b128eb3b
Parents: f3efeb8
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:40:02 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa79f00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-06-25 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 cbe15066c -> e925de648


YARN-8457. Compilation is broken with -Pyarn-ui.

(cherry picked from commit 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e925de64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e925de64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e925de64

Branch: refs/heads/branch-3.1
Commit: e925de64831e53401f4046d8664f2fef94f08267
Parents: cbe1506
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:39:26 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e925de64/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-06-25 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk abc3e4bad -> 4ffe68a6f


YARN-8457. Compilation is broken with -Pyarn-ui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ffe68a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ffe68a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ffe68a6

Branch: refs/heads/trunk
Commit: 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f
Parents: abc3e4b
Author: Rohith Sharma K S 
Authored: Mon Jun 25 10:38:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Mon Jun 25 10:38:03 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ffe68a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread inigoiri
HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.

(cherry picked from commit abc3e4bad905efde5a4881e8a072c68f6e910ade)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbe15066
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbe15066
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbe15066

Branch: refs/heads/branch-3.1
Commit: cbe15066c8acf86abd76da7165b274b3bf4bddaf
Parents: bfdbc9d
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:51:01 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbe15066/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 90eaa2a..23d2b5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -689,17 +689,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread inigoiri
HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.

(cherry picked from commit abc3e4bad905efde5a4881e8a072c68f6e910ade)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d457673
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d457673
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d457673

Branch: refs/heads/branch-2
Commit: 1d457673c8374bff7143d1f97bd065ca7a16dbc2
Parents: 11e7f2e
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:52:15 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d457673/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 5479e45..6f2e7cf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -676,17 +676,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 11e7f2e58 -> 1d457673c
  refs/heads/branch-2.9 a9da764e3 -> d99dd1e79
  refs/heads/branch-3.0 3151e95d2 -> f3efeb899
  refs/heads/branch-3.1 bfdbc9dea -> cbe15066c
  refs/heads/trunk 1ba4e6230 -> abc3e4bad


HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abc3e4ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abc3e4ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abc3e4ba

Branch: refs/heads/trunk
Commit: abc3e4bad905efde5a4881e8a072c68f6e910ade
Parents: 1ba4e62
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:50:27 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abc3e4ba/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 0e337b4..d5622af 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -689,17 +689,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread inigoiri
HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.

(cherry picked from commit abc3e4bad905efde5a4881e8a072c68f6e910ade)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d99dd1e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d99dd1e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d99dd1e7

Branch: refs/heads/branch-2.9
Commit: d99dd1e793d2ac732cb3cb6f9dcdd2483dc4f58a
Parents: a9da764
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:52:31 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d99dd1e7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 5479e45..6f2e7cf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -676,17 +676,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. Contributed by Xiao Liang.

2018-06-25 Thread inigoiri
HADOOP-15458. TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows. 
Contributed by Xiao Liang.

(cherry picked from commit abc3e4bad905efde5a4881e8a072c68f6e910ade)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3efeb89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3efeb89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3efeb89

Branch: refs/heads/branch-3.0
Commit: f3efeb899a711bc282834e9b0c84e8e16830d0c8
Parents: 3151e95
Author: Inigo Goiri 
Authored: Mon Jun 25 09:50:27 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 25 09:51:25 2018 -0700

--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3efeb89/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 90eaa2a..23d2b5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -689,17 +689,18 @@ public class TestLocalFileSystem {
 // and permission
 FSDataOutputStreamBuilder builder =
 fileSys.createFile(path);
-builder.build();
-Assert.assertEquals("Should be default block size",
-builder.getBlockSize(), fileSys.getDefaultBlockSize());
-Assert.assertEquals("Should be default replication factor",
-builder.getReplication(), fileSys.getDefaultReplication());
-Assert.assertEquals("Should be default buffer size",
-builder.getBufferSize(),
-fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
-IO_FILE_BUFFER_SIZE_DEFAULT));
-Assert.assertEquals("Should be default permission",
-builder.getPermission(), FsPermission.getFileDefault());
+try (FSDataOutputStream stream = builder.build()) {
+  Assert.assertEquals("Should be default block size",
+  builder.getBlockSize(), fileSys.getDefaultBlockSize());
+  Assert.assertEquals("Should be default replication factor",
+  builder.getReplication(), fileSys.getDefaultReplication());
+  Assert.assertEquals("Should be default buffer size",
+  builder.getBufferSize(),
+  fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT));
+  Assert.assertEquals("Should be default permission",
+  builder.getPermission(), FsPermission.getFileDefault());
+}
 
 // Test set 0 to replication, block size and buffer size
 builder = fileSys.createFile(path);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15267. S3A multipart upload fails when SSE-C encryption is enabled. Contributed by Anis Elleuch.

2018-06-25 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d084c1a4b -> 11e7f2e58


HADOOP-15267. S3A multipart upload fails when SSE-C encryption is enabled.
Contributed by Anis Elleuch.

(cherry picked from commit 1dedc68f9d8d8544d715e67ee77cd3f017c21699)
(cherry picked from commit cc0f14c13c1699489777b71a5c21bad27f9020ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11e7f2e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11e7f2e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11e7f2e5

Branch: refs/heads/branch-2
Commit: 11e7f2e5842f1aeaf2121044f47adf53bde0d8c6
Parents: d084c1a
Author: Steve Loughran 
Authored: Mon Jun 25 17:27:49 2018 +0100
Committer: Steve Loughran 
Committed: Mon Jun 25 17:27:49 2018 +0100

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 17 ++
 .../scale/ITestS3AHugeFilesSSECDiskBlocks.java  | 58 
 2 files changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e7f2e5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index f7068c4..c171a9e 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1300,6 +1300,7 @@ public class S3AFileSystem extends FileSystem {
 long len = request.getPartSize();
 incrementPutStartStatistics(len);
 try {
+  setOptionalUploadPartRequestParameters(request);
   UploadPartResult uploadPartResult = s3.uploadPart(request);
   incrementPutCompletedStatistics(true, len);
   return uploadPartResult;
@@ -2172,6 +2173,22 @@ public class S3AFileSystem extends FileSystem {
 }
   }
 
+  /**
+   * Sets server side encryption parameters to the part upload
+   * request when encryption is enabled.
+   * @param request upload part request
+   */
+  protected void setOptionalUploadPartRequestParameters(
+  UploadPartRequest request) {
+switch (serverSideEncryptionAlgorithm) {
+case SSE_C:
+  if (StringUtils.isNotBlank(getServerSideEncryptionKey(getConf( {
+request.setSSECustomerKey(generateSSECustomerKey());
+  }
+  break;
+default:
+}
+  }
 
   protected void setOptionalCopyObjectRequestParameters(
   CopyObjectRequest copyObjectRequest) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e7f2e5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java
new file mode 100644
index 000..2e5185b
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.fs.s3a.S3AEncryptionMethods;
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
+
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfEncryptionTestsDisabled;
+
+/**
+ * Concrete class that extends {@link ITestS3AHugeFilesDiskBlocks}
+ * and tests huge files operations with SSE-C encryption enabled.
+ * Skipped if the SSE tests are disabled.
+ */
+public class ITestS3AHugeFilesSSECDiskBlocks
+extends ITestS3AHugeFilesDiskBlocks {
+
+  private static final String KEY_1
+  = "4niV/jPK5VFRHY+KNb6wtqYd4xXyMgdJ9XQJpcQUVbs=";
+
+  

hadoop git commit: HDDS-189. Update HDDS to start OzoneManager. Contributed by Arpit Agarwal.

2018-06-25 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/docker-hadoop-runner 9b8eae852 -> a63c65476


HDDS-189. Update HDDS to start OzoneManager. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a63c6547
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a63c6547
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a63c6547

Branch: refs/heads/docker-hadoop-runner
Commit: a63c654761b657b82768f1fe0ee5911bee1092c2
Parents: 9b8eae8
Author: Arpit Agarwal 
Authored: Mon Jun 25 09:17:29 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Jun 25 09:17:29 2018 -0700

--
 README.md  | 15 +--
 scripts/starter.sh | 23 +++
 2 files changed, 32 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a63c6547/README.md
--
diff --git a/README.md b/README.md
index d43405d..eb6facd 100644
--- a/README.md
+++ b/README.md
@@ -29,10 +29,21 @@ docker build -t apache/hadoop-runner .
 
 ## Usage
 
-Do a full build on the HDFS-7240 branch of apache hadoop repositry
+Do a full build on Apache Hadoop trunk with the `hdds` profile enabled.
+```
+mvn clean install package -DskipTests -Pdist,hdds -Dtar 
-Dmaven.javadoc.skip=true
+```
+
+Then start HDDS services with `docker-compose`.
 
 ```
-cd dev-support/compose/ozone
+cd hadoop-dist/target/compose/ozone
 docker-compose up -d
 ```
 
+## Troubleshooting
+
+If `docker-compose` fails to work, check that the 
`hadoop-dist/target/compose/ozone/.env` file exists and has a line like the 
following (the exact version number may be different):
+```
+HDDS_VERSION=0.2.1-SNAPSHOT
+```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a63c6547/scripts/starter.sh
--
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 7792f1c..7c51826 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -55,12 +55,27 @@ if [ -n "$ENSURE_SCM_INITIALIZED" ]; then
fi
 fi
 
+
+if [ -n "$ENSURE_OM_INITIALIZED" ]; then
+   if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then
+  # To make sure SCM is running in dockerized environment we will sleep
+  # Could be removed after HDFS-13203
+  echo "Waiting 15 seconds for SCM startup"
+  sleep 15
+  /opt/hadoop/bin/ozone om -createObjectStore
+   fi
+fi
+
+
+# The KSM initialization block will go away eventually once
+# we have completed renaming KSM to OzoneManager (OM).
+#
 if [ -n "$ENSURE_KSM_INITIALIZED" ]; then
if [ ! -f "$ENSURE_KSM_INITIALIZED" ]; then
-  #To make sure SCM is running in dockerized environment we will sleep
-   # Could be removed after HDFS-13203
-   echo "Waiting 15 seconds for SCM startup"
-   sleep 15
+  # To make sure SCM is running in dockerized environment we will sleep
+  # Could be removed after HDFS-13203
+  echo "Waiting 15 seconds for SCM startup"
+  sleep 15
   /opt/hadoop/bin/ozone ksm -createObjectStore
fi
 fi


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14396. Add builder interface to FileContext. Contributed by Lei (Eddy) Xu.

2018-06-25 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 440140cea -> 1ba4e6230


HADOOP-14396. Add builder interface to FileContext.
Contributed by  Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ba4e623
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ba4e623
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ba4e623

Branch: refs/heads/trunk
Commit: 1ba4e62304a70d53f1a4f76995b6e1fac3107922
Parents: 440140c
Author: Steve Loughran 
Authored: Mon Jun 25 14:38:33 2018 +0100
Committer: Steve Loughran 
Committed: Mon Jun 25 14:38:57 2018 +0100

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java| 22 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 66 
 .../main/java/org/apache/hadoop/fs/Options.java |  3 +
 .../fs/FileContextMainOperationsBaseTest.java   | 44 -
 4 files changed, 134 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba4e623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 86c284a..d431293 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -116,6 +116,27 @@ public abstract class FSDataOutputStreamBuilder
   protected abstract B getThisBuilder();
 
   /**
+   * Construct from a {@link FileContext}.
+   *
+   * @param fc FileContext
+   * @param p path.
+   * @throws IOException
+   */
+  FSDataOutputStreamBuilder(@Nonnull FileContext fc,
+  @Nonnull Path p) throws IOException {
+Preconditions.checkNotNull(fc);
+Preconditions.checkNotNull(p);
+this.fs = null;
+this.path = p;
+
+AbstractFileSystem afs = fc.getFSofPath(p);
+FsServerDefaults defaults = afs.getServerDefaults(p);
+bufferSize = defaults.getFileBufferSize();
+replication = defaults.getReplication();
+blockSize = defaults.getBlockSize();
+  }
+
+  /**
* Constructor.
*/
   protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem,
@@ -131,6 +152,7 @@ public abstract class FSDataOutputStreamBuilder
   }
 
   protected FileSystem getFS() {
+Preconditions.checkNotNull(fs);
 return fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba4e623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 6ea69d0..5215c3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashSet;
@@ -35,6 +36,8 @@ import java.util.Stack;
 import java.util.TreeSet;
 import java.util.Map.Entry;
 
+import javax.annotation.Nonnull;
+
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -695,6 +698,69 @@ public class FileContext {
   }
 
   /**
+   * {@link FSDataOutputStreamBuilder} for {@liink FileContext}.
+   */
+  private static final class FCDataOutputStreamBuilder extends
+  FSDataOutputStreamBuilder<
+FSDataOutputStream, FCDataOutputStreamBuilder> {
+private final FileContext fc;
+
+private FCDataOutputStreamBuilder(
+@Nonnull FileContext fc, @Nonnull Path p) throws IOException {
+  super(fc, p);
+  this.fc = fc;
+  Preconditions.checkNotNull(fc);
+}
+
+@Override
+protected FCDataOutputStreamBuilder getThisBuilder() {
+  return this;
+}
+
+@Override
+public FSDataOutputStream build() throws IOException {
+  final EnumSet flags = getFlags();
+  List createOpts = new ArrayList<>(Arrays.asList(
+  CreateOpts.blockSize(getBlockSize()),
+  CreateOpts.bufferSize(getBufferSize()),
+  CreateOpts.repFac(getReplication()),
+