hadoop git commit: HADOOP-13529. Do some code refactoring. Contributed by Genmao Yu.

2016-08-25 Thread shimingfei
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12756 aff1841d0 -> 5a7e6f77c


HADOOP-13529. Do some code refactoring. Contributed by Genmao Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a7e6f77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a7e6f77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a7e6f77

Branch: refs/heads/HADOOP-12756
Commit: 5a7e6f77c868bf590d8a2f6bb9e58a0b6a86d1d6
Parents: aff1841
Author: Mingfei 
Authored: Fri Aug 26 11:00:03 2016 +0800
Committer: Mingfei 
Committed: Fri Aug 26 11:44:51 2016 +0800

--
 hadoop-tools/hadoop-aliyun/pom.xml  |  23 +-
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  | 455 +++--
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 486 +++
 .../fs/aliyun/oss/AliyunOSSInputStream.java |  60 +--
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 129 +
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 193 
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |   3 +-
 .../aliyun/oss/TestOSSFileSystemContract.java   |  10 -
 .../fs/aliyun/oss/TestOSSFileSystemStore.java   | 121 +
 .../fs/aliyun/oss/contract/OSSContract.java |   1 -
 .../oss/contract/TestOSSContractDispCp.java |  44 ++
 .../contract/TestOSSContractGetFileStatus.java  |  35 ++
 .../oss/contract/TestOSSContractRootDir.java|  69 +++
 13 files changed, 968 insertions(+), 661 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7e6f77/hadoop-tools/hadoop-aliyun/pom.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml 
b/hadoop-tools/hadoop-aliyun/pom.xml
index c87d13f..358b18b 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -128,6 +128,27 @@
   test
   test-jar
 
-
+
+  org.apache.hadoop
+  hadoop-distcp
+  test
+
+
+  org.apache.hadoop
+  hadoop-distcp
+  test
+  test-jar
+
+
+  org.apache.hadoop
+  hadoop-yarn-server-tests
+  test
+  test-jar
+
+
+  org.apache.hadoop
+  hadoop-mapreduce-client-jobclient
+  test
+
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7e6f77/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index afe7242..ad321bd 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -20,18 +20,12 @@ package org.apache.hadoop.fs.aliyun.oss;
 
 import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
 
-import com.aliyun.oss.ClientException;
-import com.aliyun.oss.common.auth.CredentialsProvider;
-import com.aliyun.oss.common.auth.DefaultCredentialProvider;
-import com.aliyun.oss.common.auth.DefaultCredentials;
-import java.io.ByteArrayInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,30 +33,13 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.aliyun.oss.AliyunOSSUtils.UserInfo;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.Progressable;
 
-import com.aliyun.oss.ClientConfiguration;
-import com.aliyun.oss.OSSClient;
-import com.aliyun.oss.OSSException;
-import com.aliyun.oss.common.comm.Protocol;
-import com.aliyun.oss.model.AbortMultipartUploadRequest;
-import com.aliyun.oss.model.CannedAccessControlList;
-import com.aliyun.oss.model.CompleteMultipartUploadRequest;
-import com.aliyun.oss.model.CompleteMultipartUploadResult;
-import com.aliyun.oss.model.CopyObjectResult;
-import com.aliyun.oss.model.DeleteObjectsRequest;
-import com.aliyun.oss.model.InitiateMultipartUploadRequest;
-import com.aliyun.oss.model.InitiateMultipartUploadResult;
-import com.aliyun.oss.model.ListObjectsRequest;
 import com.aliyun.oss.model.OSSObjectSummary;
 import 

hadoop git commit: YARN-5564. Fix typo in RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE. Contributed by Ray Chiang

2016-08-25 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 13cbf1677 -> ee3358402


YARN-5564. Fix typo in RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE. 
Contributed by  Ray Chiang

(cherry picked from commit 27c3b86252386c9c064a6420b3c650644cbb9ef3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee335840
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee335840
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee335840

Branch: refs/heads/branch-2
Commit: ee3358402a70d65f860289a5f171921083176a91
Parents: 13cbf16
Author: Naganarasimha 
Authored: Fri Aug 26 08:47:21 2016 +0530
Committer: Naganarasimha 
Committed: Fri Aug 26 08:53:42 2016 +0530

--
 .../scheduler/fair/FairSchedulerConfiguration.java   | 4 ++--
 .../resourcemanager/scheduler/fair/FairSchedulerTestBase.java| 4 ++--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 2 +-
 .../scheduler/fair/TestFairSchedulerPreemption.java  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee335840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 22cb10c..b18dd7d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -52,7 +52,7 @@ public class FairSchedulerConfiguration extends Configuration 
{
* multiple of increment allocation. Only container sizes above this are
* allowed to reserve a node */
   public static final String
-  RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE =
+  RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE =
   YarnConfiguration.YARN_PREFIX +
   "scheduler.reservation-threshold.increment-multiple";
   public static final float
@@ -190,7 +190,7 @@ public class FairSchedulerConfiguration extends 
Configuration {
 
   public float getReservationThresholdIncrementMultiple() {
 return getFloat(
-  RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE,
+  RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE,
   DEFAULT_RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee335840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index ec0e6aa..8e6272a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -84,7 +84,7 @@ public class FairSchedulerTestBase {
 
 conf.setFloat(
 FairSchedulerConfiguration
-   .RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE,
+   .RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE,
 TEST_RESERVATION_THRESHOLD);
 return conf;
   }
@@ -280,4 +280,4 @@ public class FairSchedulerTestBase {
 Assert.assertEquals(resource.getVirtualCores(),
 app.getCurrentConsumption().getVirtualCores());
   }
-}
\ No newline at end of file
+}


hadoop git commit: YARN-5564. Fix typo in RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE. Contributed by Ray Chiang

2016-08-25 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/trunk f4a21d3ab -> 27c3b8625


YARN-5564. Fix typo in RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE. 
Contributed by  Ray Chiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27c3b862
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27c3b862
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27c3b862

Branch: refs/heads/trunk
Commit: 27c3b86252386c9c064a6420b3c650644cbb9ef3
Parents: f4a21d3
Author: Naganarasimha 
Authored: Fri Aug 26 08:47:21 2016 +0530
Committer: Naganarasimha 
Committed: Fri Aug 26 08:47:21 2016 +0530

--
 .../scheduler/fair/FairSchedulerConfiguration.java   | 4 ++--
 .../resourcemanager/scheduler/fair/FairSchedulerTestBase.java| 4 ++--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 2 +-
 .../scheduler/fair/TestFairSchedulerPreemption.java  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c3b862/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 22cb10c..b18dd7d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -52,7 +52,7 @@ public class FairSchedulerConfiguration extends Configuration 
{
* multiple of increment allocation. Only container sizes above this are
* allowed to reserve a node */
   public static final String
-  RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE =
+  RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE =
   YarnConfiguration.YARN_PREFIX +
   "scheduler.reservation-threshold.increment-multiple";
   public static final float
@@ -190,7 +190,7 @@ public class FairSchedulerConfiguration extends 
Configuration {
 
   public float getReservationThresholdIncrementMultiple() {
 return getFloat(
-  RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE,
+  RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE,
   DEFAULT_RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c3b862/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index ec0e6aa..8e6272a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -84,7 +84,7 @@ public class FairSchedulerTestBase {
 
 conf.setFloat(
 FairSchedulerConfiguration
-   .RM_SCHEDULER_RESERVATION_THRESHOLD_INCERMENT_MULTIPLE,
+   .RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE,
 TEST_RESERVATION_THRESHOLD);
 return conf;
   }
@@ -280,4 +280,4 @@ public class FairSchedulerTestBase {
 Assert.assertEquals(resource.getVirtualCores(),
 app.getCurrentConsumption().getVirtualCores());
   }
-}
\ No newline at end of file
+}


hadoop git commit: HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by Sammi Chen

2016-08-25 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 4e9129b7e -> 743f418ae


HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/743f418a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/743f418a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/743f418a

Branch: refs/heads/branch-3.0.0-alpha1
Commit: 743f418ae05bd5b7583702773f1706ea7219c91a
Parents: 4e9129b
Author: Kai Zheng 
Authored: Sat Aug 27 10:56:12 2016 +0800
Committer: Kai Zheng 
Committed: Sat Aug 27 10:56:12 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/743f418a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
index d75a8ef..c984c3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
@@ -181,7 +181,7 @@ class ByteBufferStrategy implements ReaderStrategy {
int length) throws IOException {
 ByteBuffer tmpBuf = readBuf.duplicate();
 tmpBuf.limit(tmpBuf.position() + length);
-int nRead = blockReader.read(readBuf.slice());
+int nRead = blockReader.read(tmpBuf);
 // Only when data are read, update the position
 if (nRead > 0) {
   readBuf.position(readBuf.position() + nRead);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by Sammi Chen

2016-08-25 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk 81485dbfc -> f4a21d3ab


HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4a21d3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4a21d3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4a21d3a

Branch: refs/heads/trunk
Commit: f4a21d3abaa7c5a9f0a0d8417e81f7eaf3d1b29a
Parents: 81485db
Author: Kai Zheng 
Authored: Sat Aug 27 10:54:25 2016 +0800
Committer: Kai Zheng 
Committed: Sat Aug 27 10:54:25 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a21d3a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
index d75a8ef..c984c3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
@@ -181,7 +181,7 @@ class ByteBufferStrategy implements ReaderStrategy {
int length) throws IOException {
 ByteBuffer tmpBuf = readBuf.duplicate();
 tmpBuf.limit(tmpBuf.position() + length);
-int nRead = blockReader.read(readBuf.slice());
+int nRead = blockReader.read(tmpBuf);
 // Only when data are read, update the position
 if (nRead > 0) {
   readBuf.position(readBuf.position() + nRead);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HADOOP-13465. Design Server.Call to be extensible for unified call queue. Contributed by Daryn Sharp."

2016-08-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1360bd2d5 -> 81485dbfc


Revert "HADOOP-13465. Design Server.Call to be extensible for unified call 
queue. Contributed by Daryn Sharp."

This reverts commit d288a0ba8364d81aacda9f4a21022eecb6dc4e22.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81485dbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81485dbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81485dbf

Branch: refs/heads/trunk
Commit: 81485dbfc1ffb8daa609be8eb31094cc28646dd3
Parents: 1360bd2
Author: Kihwal Lee 
Authored: Thu Aug 25 16:04:54 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Aug 25 16:04:54 2016 -0500

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 336 ---
 1 file changed, 145 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81485dbf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 09fe889..4c73f6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -354,9 +354,10 @@ public abstract class Server {
*/
   public static InetAddress getRemoteIp() {
 Call call = CurCall.get();
-return (call != null ) ? call.getHostInetAddress() : null;
+return (call != null && call.connection != null) ? call.connection
+.getHostInetAddress() : null;
   }
-
+  
   /**
* Returns the clientId from the current RPC request
*/
@@ -379,9 +380,10 @@ public abstract class Server {
*/
   public static UserGroupInformation getRemoteUser() {
 Call call = CurCall.get();
-return (call != null) ? call.getRemoteUser() : null;
+return (call != null && call.connection != null) ? call.connection.user
+: null;
   }
-
+ 
   /** Return true if the invocation was through an RPC.
*/
   public static boolean isRpcInvocation() {
@@ -481,7 +483,7 @@ public abstract class Server {
 if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
 (processingTime > threeSigma)) {
   if(LOG.isWarnEnabled()) {
-String client = CurCall.get().toString();
+String client = CurCall.get().connection.toString();
 LOG.warn(
 "Slow RPC : " + methodName + " took " + processingTime +
 " milliseconds to process from client " + client);
@@ -655,65 +657,62 @@ public abstract class Server {
 CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT);
   }
 
-  /** A generic call queued for handling. */
-  public static class Call implements Schedulable,
-  PrivilegedExceptionAction {
-final int callId;// the client's call id
-final int retryCount;// the retry count of the call
-long timestamp;  // time received when response is null
- // time served when response is not null
+  /** A call queued for handling. */
+  public static class Call implements Schedulable {
+private final int callId; // the client's call id
+private final int retryCount;// the retry count of the call
+private final Writable rpcRequest;// Serialized Rpc request from client
+private final Connection connection;  // connection to client
+private long timestamp;   // time received when response is 
null
+  // time served when response is not 
null
+private ByteBuffer rpcResponse;   // the response for this call
 private AtomicInteger responseWaitCount = new AtomicInteger(1);
-final RPC.RpcKind rpcKind;
-final byte[] clientId;
+private final RPC.RpcKind rpcKind;
+private final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
 private final CallerContext callerContext; // the call context
 private int priorityLevel;
 // the priority level assigned by scheduler, 0 by default
 
-Call(Call call) {
-  this(call.callId, call.retryCount, call.rpcKind, call.clientId,
-  call.traceScope, call.callerContext);
+private Call(Call call) {
+  this(call.callId, call.retryCount, call.rpcRequest, call.connection,
+  call.rpcKind, call.clientId, call.traceScope, call.callerContext);
 }
 
-Call(int id, int retryCount, RPC.RpcKind kind, byte[] clientId) {
-  this(id, retryCount, kind, clientId, null, null);
+

svn commit: r14971 - in /release/hadoop/common: ./ hadoop-2.7.3/

2016-08-25 Thread vinodkv
Author: vinodkv
Date: Thu Aug 25 19:25:04 2016
New Revision: 14971

Log:
Publishing the bits for release 2.7.3

Added:
release/hadoop/common/hadoop-2.7.3/
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz   (with props)
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.asc   (with 
props)
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.mds
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz   (with props)
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.asc   (with props)
release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.mds
Modified:
release/hadoop/common/current
release/hadoop/common/stable2

Modified: release/hadoop/common/current
==
--- release/hadoop/common/current (original)
+++ release/hadoop/common/current Thu Aug 25 19:25:04 2016
@@ -1 +1 @@
-link hadoop-2.7.2
\ No newline at end of file
+link hadoop-2.7.3
\ No newline at end of file

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz
--
svn:mime-type = application/x-gzip

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.asc
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3-src.tar.gz.mds Thu Aug 25 
19:25:04 2016
@@ -0,0 +1,17 @@
+hadoop-2.7.3-src.tar.gz:MD5 = 95 5C EA EB 83 17 E6 EB  D6 DE 06 65 87 80 72
+  3F
+hadoop-2.7.3-src.tar.gz:   SHA1 = A067 EA2C 656D F6FF 064C  71DE CA01 9DC5 EA99
+  317F
+hadoop-2.7.3-src.tar.gz: RMD160 = CE3F 00C7 4C03 D5F1 0FCB  2AB1 302B FFFD FCAE
+  9A91
+hadoop-2.7.3-src.tar.gz: SHA224 = 41BA6C94 23E0CC92 200EFF93 29C059E4 E029700C
+  8A662845 7A232A38
+hadoop-2.7.3-src.tar.gz: SHA256 = 227785DC 6E3E6EF8 CFD64393 B305D090 78A20970
+  3C9C0191 0A1BDDCF 86BE3054
+hadoop-2.7.3-src.tar.gz: SHA384 = 9DEB84BB 7787D8FC A0E45936 25B93958 A52B064E
+  CAE6CB7D 2DFEFFDF 54F79CFE 5C451B85 FA287339
+  6DCD59C8 47141FD4
+hadoop-2.7.3-src.tar.gz: SHA512 = 8451F89D 3CBB6728 88ABC67C 76A53B2D 50F44B88
+  78127C3E 361CB354 CD1B5A3A 2BC7D531 C1BA67E9
+  BC3D17E5 C6AA496D 11969484 C12C86B5 6E8823CD
+  1AB6482A

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
--
svn:mime-type = application/x-gzip

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.asc
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz.mds Thu Aug 25 
19:25:04 2016
@@ -0,0 +1,14 @@
+hadoop-2.7.3.tar.gz:MD5 = 34 55 BB 57 E4 B4 90 6B  BE A6 7B 58 CC A7 8F A8
+hadoop-2.7.3.tar.gz:   SHA1 = B84B 8989 3426 9C68 753E  4E03 6D21 395E 5A4A 
B5B1
+hadoop-2.7.3.tar.gz: RMD160 = 8FE4 A91E 8C67 2A33 C4E9  61FB 607A DBBD 1AE5 
E03A
+hadoop-2.7.3.tar.gz: SHA224 = 23AB1EAB B7648921 7101671C DCF9D774 7B84AD50
+  6A74E300 AE6617FA
+hadoop-2.7.3.tar.gz: SHA256 = D489DF38 08244B90 6EB38F4D 081BA49E 50C4603D
+  B03EFD5E 594A1E98 B09259C2
+hadoop-2.7.3.tar.gz: SHA384 = EFB42E60 3AF4FFB2 BA9F4CF4 1B56F71B D3F3BD8F
+  23331C25 27267762 FDEB67F0 F2B6F56D 797842DB
+  BB8C9F75 9DBA195D

[hadoop] Git Push Summary

2016-08-25 Thread vinodkv
Repository: hadoop
Updated Tags:  refs/tags/rel/release-2.7.3 [created] 269673cce

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via Sunil G)

2016-08-25 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 e6afd277d -> 9b1532903


YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b153290
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b153290
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b153290

Branch: refs/heads/YARN-3368
Commit: 9b15329039ee6bc66deb4fcb5473bb09c95a12b7
Parents: e6afd27
Author: sunilg 
Authored: Thu Aug 25 23:21:29 2016 +0530
Committer: sunilg 
Committed: Thu Aug 25 23:21:29 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 59 +---
 .../src/main/webapp/ember-cli-build.js  |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  3 +-
 3 files changed, 17 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b153290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 2933a76..fca8d30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -35,7 +35,7 @@
 node
 v0.12.2
 2.10.0
-false
+false
   
 
   
@@ -60,19 +60,20 @@
   
 
   
- maven-clean-plugin
- 3.0.0
- 
-false
-
-   
-  
${basedir}/src/main/webapp/bower_components
-   
-   
-  
${basedir}/src/main/webapp/node_modules
-   
-
- 
+maven-clean-plugin
+3.0.0
+
+  ${keep-ui-build-cache}
+  false
+  
+
+  
${basedir}/src/main/webapp/bower_components
+
+
+  ${basedir}/src/main/webapp/node_modules
+
+  
+
   
 
   
@@ -126,21 +127,6 @@
 
   
   
-generate-sources
-bower --allow-root install
-
-  exec
-
-
-  ${webappDir}
-  bower
-  
---allow-root
-install
-  
-
-  
-  
 ember build
 generate-sources
 
@@ -158,21 +144,6 @@
 
   
   
-ember test
-generate-resources
-
-  exec
-
-
-  ${skipTests}
-  ${webappDir}
-  ember
-  
-test
-  
-
-  
-  
 cleanup tmp
 generate-sources
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b153290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index d21cc3e..7736c75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -22,7 +22,7 @@ var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
   var app = new EmberApp(defaults, {
-// Add options here
+hinting: false
   });
 
   
app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b153290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index baa473a..6a4eb16 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -9,8 +9,7 @@
   },
   "scripts": {
 "build": "ember build",
-"start": "ember server",
-"test": "ember test"
+"start": "ember server"
   },
   "repository": "",
   "engines": {



[1/2] hadoop git commit: HDFS-9145. Tracking methods that hold FSNamesytemLock for too long. Contributed by Mingliang Liu.

2016-08-25 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e5cb9d9e1 -> 8aa18534d


HDFS-9145. Tracking methods that hold FSNamesytemLock for too long. Contributed 
by Mingliang Liu.

(cherry picked from commit 5c5362980c429a5ad0b58e0dd1933de9cde7f369)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d8a1636
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d8a1636
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d8a1636

Branch: refs/heads/branch-2.7
Commit: 0d8a1636ef292c67d0537581552ce03024c2d04d
Parents: e5cb9d9
Author: Haohui Mai 
Authored: Tue Oct 13 13:20:11 2015 -0700
Committer: Zhe Zhang 
Committed: Thu Aug 25 10:18:50 2016 -0700

--
 .../apache/hadoop/test/GenericTestUtils.java|  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 21 
 .../hdfs/server/namenode/TestFSNamesystem.java  | 56 
 3 files changed, 81 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d8a1636/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 379f272..466afe0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -195,6 +195,10 @@ public abstract class GenericTestUtils {
   logger.removeAppender(appender);
 
 }
+
+public void clearOutput() {
+  sw.getBuffer().setLength(0);
+}
   }
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d8a1636/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bb99fb2..0971539 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1472,6 +1472,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return Util.stringCollectionAsURIs(dirNames);
   }
 
+  /** Threshold (ms) for long holding write lock report. */
+  static final short WRITELOCK_REPORTING_THRESHOLD = 1000;
+  /** Last time stamp for write lock. Keep the longest one for 
multi-entrance.*/
+  private long writeLockHeldTimeStamp;
+
   @Override
   public void readLock() {
 this.fsLock.readLock().lock();
@@ -1483,14 +1488,30 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Override
   public void writeLock() {
 this.fsLock.writeLock().lock();
+if (fsLock.getWriteHoldCount() == 1) {
+  writeLockHeldTimeStamp = monotonicNow();
+}
   }
   @Override
   public void writeLockInterruptibly() throws InterruptedException {
 this.fsLock.writeLock().lockInterruptibly();
+if (fsLock.getWriteHoldCount() == 1) {
+  writeLockHeldTimeStamp = monotonicNow();
+}
   }
   @Override
   public void writeUnlock() {
+final boolean needReport = fsLock.getWriteHoldCount() == 1 &&
+fsLock.isWriteLockedByCurrentThread();
 this.fsLock.writeLock().unlock();
+
+if (needReport) {
+  long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
+  if (writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
+LOG.info("FSNamesystem write lock held for " + writeLockInterval +
+" ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
+  }
+}
   }
   @Override
   public boolean hasWriteLock() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d8a1636/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 2453542..5705922 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 

[2/2] hadoop git commit: HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. Contributed by Mingliang Liu.

2016-08-25 Thread zhz
HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. 
Contributed by Mingliang Liu.

(cherry picked from commit e556c35b0596700f9ec9d0a51cf5027259d531b5)
(cherry picked from commit 288cf8437b7e03f071e95eb05e83a26e58fff26b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa18534
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa18534
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa18534

Branch: refs/heads/branch-2.7
Commit: 8aa18534d9d1bee03e3a5b8db15d2b2034e3adc6
Parents: 0d8a163
Author: Jing Zhao 
Authored: Wed Nov 25 14:21:06 2015 -0800
Committer: Zhe Zhang 
Committed: Thu Aug 25 10:34:43 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 11 +--
 1 file changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa18534/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0971539..bd697e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1503,14 +1503,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   public void writeUnlock() {
 final boolean needReport = fsLock.getWriteHoldCount() == 1 &&
 fsLock.isWriteLockedByCurrentThread();
+final long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
+
 this.fsLock.writeLock().unlock();
 
-if (needReport) {
-  long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
-  if (writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
-LOG.info("FSNamesystem write lock held for " + writeLockInterval +
-" ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
-  }
+if (needReport && writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
+  LOG.info("FSNamesystem write lock held for " + writeLockInterval +
+  " ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
 }
   }
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.

2016-08-25 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 5fe29062e -> e5cb9d9e1


HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.

(cherry picked from commit cb03768b1b2250b9b5a7944cf6ef918e8a974e20)
(cherry picked from commit 57d55d40dd41ba4449b91c4676131b6b840052c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1d8e421
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1d8e421
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1d8e421

Branch: refs/heads/branch-2.7
Commit: a1d8e421f698c496f80428ddb0b9be4e1733e2e8
Parents: 5fe2906
Author: cnauroth 
Authored: Tue Jul 21 13:55:58 2015 -0700
Committer: Zhe Zhang 
Committed: Thu Aug 25 09:39:27 2016 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md| 1 +
 .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java| 7 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 6 ++
 .../hdfs/server/namenode/metrics/FSNamesystemMBean.java   | 5 +
 .../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 6 ++
 .../hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java| 5 +
 6 files changed, 30 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d8e421/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 01a8d5b..695641e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -215,6 +215,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `TotalLoad` | Current number of connections |
 | `SnapshottableDirectories` | Current number of snapshottable directories |
 | `Snapshots` | Current number of snapshots |
+| `NumEncryptionZones` | Current number of encryption zones |
 | `BlocksTotal` | Current number of allocated blocks in the system |
 | `FilesTotal` | Current number of files and directories |
 | `PendingReplicationBlocks` | Current number of blocks pending to be 
replicated |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d8e421/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index bda7aba..b30b2f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -387,6 +387,13 @@ public class EncryptionZoneManager {
   }
 
   /**
+   * @return number of encryption zones.
+   */
+  public int getNumEncryptionZones() {
+return encryptionZones.size();
+  }
+
+  /**
* @return Whether there has been any attempt to create an encryption zone in
* the cluster at all. If not, it is safe to quickly return null when
* checking the encryption information of any file or directory in the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d8e421/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1031010..cd1fe4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4930,6 +4930,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return JSON.toString(info);
   }
 
+  @Override // FSNamesystemMBean
+  @Metric({ "NumEncryptionZones", "The number of encryption zones" })
+  public int getNumEncryptionZones() {
+return dir.ezManager.getNumEncryptionZones();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {


[2/2] hadoop git commit: HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length. Contributed by Anu Engineer.

2016-08-25 Thread zhz
HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length. Contributed 
by Anu Engineer.

(cherry picked from commit a7862d5fe4c505f5d4b0c675438a971733f1f53a)
(cherry picked from commit 27ccbd51f6827d4e4a26c2026c024a3400ab96f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5cb9d9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5cb9d9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5cb9d9e

Branch: refs/heads/branch-2.7
Commit: e5cb9d9e186deba933099e5c1906dc3d80efe5c9
Parents: a1d8e42
Author: Xiaoyu Yao 
Authored: Mon Aug 17 10:15:56 2015 -0700
Committer: Zhe Zhang 
Committed: Thu Aug 25 10:10:18 2016 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++
 .../hdfs/server/namenode/FSNamesystemLock.java  | 11 
 .../namenode/metrics/FSNamesystemMBean.java |  9 +++
 .../hdfs/server/namenode/TestFSNamesystem.java  | 28 
 .../server/namenode/TestNameNodeMXBean.java | 22 ++-
 6 files changed, 85 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5cb9d9e/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 695641e..a29a7d9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -230,6 +230,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `TotalFiles` | Current number of files and directories (same as FilesTotal) |
+| `LockQueueLength` | Number of threads waiting to acquire FSNameSystem lock |
 
 JournalNode
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5cb9d9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index cd1fe4b..bb99fb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4936,6 +4936,21 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return dir.ezManager.getNumEncryptionZones();
   }
 
+  /**
+   * Returns the length of the wait Queue for the FSNameSystemLock.
+   *
+   * A larger number here indicates lots of threads are waiting for
+   * FSNameSystemLock.
+   *
+   * @return int - Number of Threads waiting to acquire FSNameSystemLock
+   */
+  @Override
+  @Metric({"LockQueueLength", "Number of threads waiting to " +
+  "acquire FSNameSystemLock"})
+  public int getFsLockQueueLength() {
+return fsLock.getQueueLength();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5cb9d9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 7e820d8..d239796 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -59,4 +59,15 @@ class FSNamesystemLock implements ReadWriteLock {
   public boolean isWriteLockedByCurrentThread() {
 return coarseLock.isWriteLockedByCurrentThread();
   }
+
+  /**
+   * Returns the QueueLength of waiting threads.
+   *
+   * A larger number indicates greater lock contention.
+   *
+   * @return int - Number of threads waiting on this lock
+   */
+  public int getQueueLength() {
+return coarseLock.getQueueLength();
+  }
 }


hadoop git commit: MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.

2016-08-25 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 db415ea16 -> 8d467037c


MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.

(cherry picked from commit 1360bd2d545134b582e70f2add33a105710dc80b)
(cherry picked from commit 13cbf1677ef5ba82b16d9a5ab1661333a492226d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d467037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d467037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d467037

Branch: refs/heads/branch-2.8
Commit: 8d467037cf6fba3af3ed14e08ad8e7e4d2f6a72a
Parents: db415ea
Author: Wei-Chiu Chuang 
Authored: Thu Aug 25 09:49:23 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 25 09:54:57 2016 -0700

--
 .../src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d467037/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index d7d751a..22fe344 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  * bin/hadoop jar hadoop-*-examples.jar teragen 100 in-dir
  */
 public class TeraGen extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(TeraSort.class);
+  private static final Log LOG = LogFactory.getLog(TeraGen.class);
 
   public static enum Counters {CHECKSUM}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.

2016-08-25 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fc6750139 -> 13cbf1677


MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.

(cherry picked from commit 1360bd2d545134b582e70f2add33a105710dc80b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13cbf167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13cbf167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13cbf167

Branch: refs/heads/branch-2
Commit: 13cbf1677ef5ba82b16d9a5ab1661333a492226d
Parents: fc67501
Author: Wei-Chiu Chuang 
Authored: Thu Aug 25 09:49:23 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 25 09:51:17 2016 -0700

--
 .../src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13cbf167/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index d7d751a..22fe344 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  * bin/hadoop jar hadoop-*-examples.jar teragen 100 in-dir
  */
 public class TeraGen extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(TeraSort.class);
+  private static final Log LOG = LogFactory.getLog(TeraGen.class);
 
   public static enum Counters {CHECKSUM}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.

2016-08-25 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk d288a0ba8 -> 1360bd2d5


MAPREDUCE-6764. Teragen LOG initialization bug. Contributed by Yufei Gu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1360bd2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1360bd2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1360bd2d

Branch: refs/heads/trunk
Commit: 1360bd2d545134b582e70f2add33a105710dc80b
Parents: d288a0b
Author: Wei-Chiu Chuang 
Authored: Thu Aug 25 09:49:23 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 25 09:50:12 2016 -0700

--
 .../src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1360bd2d/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index d7d751a..22fe344 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  * bin/hadoop jar hadoop-*-examples.jar teragen 100 in-dir
  */
 public class TeraGen extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(TeraSort.class);
+  private static final Log LOG = LogFactory.getLog(TeraGen.class);
 
   public static enum Counters {CHECKSUM}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13465. Design Server.Call to be extensible for unified call queue. Contributed by Daryn Sharp.

2016-08-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4da5000dd -> d288a0ba8


HADOOP-13465. Design Server.Call to be extensible for unified call queue. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d288a0ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d288a0ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d288a0ba

Branch: refs/heads/trunk
Commit: d288a0ba8364d81aacda9f4a21022eecb6dc4e22
Parents: 4da5000
Author: Kihwal Lee 
Authored: Thu Aug 25 11:43:39 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Aug 25 11:44:13 2016 -0500

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 336 +++
 1 file changed, 191 insertions(+), 145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d288a0ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 4c73f6a..09fe889 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -354,10 +354,9 @@ public abstract class Server {
*/
   public static InetAddress getRemoteIp() {
 Call call = CurCall.get();
-return (call != null && call.connection != null) ? call.connection
-.getHostInetAddress() : null;
+return (call != null ) ? call.getHostInetAddress() : null;
   }
-  
+
   /**
* Returns the clientId from the current RPC request
*/
@@ -380,10 +379,9 @@ public abstract class Server {
*/
   public static UserGroupInformation getRemoteUser() {
 Call call = CurCall.get();
-return (call != null && call.connection != null) ? call.connection.user
-: null;
+return (call != null) ? call.getRemoteUser() : null;
   }
- 
+
   /** Return true if the invocation was through an RPC.
*/
   public static boolean isRpcInvocation() {
@@ -483,7 +481,7 @@ public abstract class Server {
 if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
 (processingTime > threeSigma)) {
   if(LOG.isWarnEnabled()) {
-String client = CurCall.get().connection.toString();
+String client = CurCall.get().toString();
 LOG.warn(
 "Slow RPC : " + methodName + " took " + processingTime +
 " milliseconds to process from client " + client);
@@ -657,62 +655,65 @@ public abstract class Server {
 CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT);
   }
 
-  /** A call queued for handling. */
-  public static class Call implements Schedulable {
-private final int callId; // the client's call id
-private final int retryCount;// the retry count of the call
-private final Writable rpcRequest;// Serialized Rpc request from client
-private final Connection connection;  // connection to client
-private long timestamp;   // time received when response is 
null
-  // time served when response is not 
null
-private ByteBuffer rpcResponse;   // the response for this call
+  /** A generic call queued for handling. */
+  public static class Call implements Schedulable,
+  PrivilegedExceptionAction {
+final int callId;// the client's call id
+final int retryCount;// the retry count of the call
+long timestamp;  // time received when response is null
+ // time served when response is not null
 private AtomicInteger responseWaitCount = new AtomicInteger(1);
-private final RPC.RpcKind rpcKind;
-private final byte[] clientId;
+final RPC.RpcKind rpcKind;
+final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
 private final CallerContext callerContext; // the call context
 private int priorityLevel;
 // the priority level assigned by scheduler, 0 by default
 
-private Call(Call call) {
-  this(call.callId, call.retryCount, call.rpcRequest, call.connection,
-  call.rpcKind, call.clientId, call.traceScope, call.callerContext);
+Call(Call call) {
+  this(call.callId, call.retryCount, call.rpcKind, call.clientId,
+  call.traceScope, call.callerContext);
 }
 
-public Call(int id, int retryCount, Writable param, 
-Connection connection) {
-  this(id, retryCount, param, connection, RPC.RpcKind.RPC_BUILTIN,
-  

hadoop git commit: HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes timeout. Contributed by Yiqun Lin.

2016-08-25 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8657dece8 -> db415ea16


HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes 
timeout. Contributed by Yiqun Lin.

(cherry picked from commit 4da5000dd33cf013e7212848ed2c44f1e60e860e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db415ea1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db415ea1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db415ea1

Branch: refs/heads/branch-2.8
Commit: db415ea166221f3439efe2a5bc486be7a509f471
Parents: 8657dec
Author: Xiaoyu Yao 
Authored: Thu Aug 25 09:00:44 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Aug 25 09:22:16 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db415ea1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 6755c26..bc81987 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -699,6 +699,9 @@ public class TestFileTruncate {
 assertEquals(newBlock.getBlock().getGenerationStamp(),
 oldBlock.getBlock().getGenerationStamp() + 1);
 
+Thread.sleep(2000);
+// trigger the second time BR to delete the corrupted replica if there's 
one
+cluster.triggerBlockReports();
 // Wait replicas come to 3
 DFSTestUtil.waitReplication(fs, p, REPLICATION);
 // Old replica is disregarded and replaced with the truncated one


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes timeout. Contributed by Yiqun Lin.

2016-08-25 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4037b22b7 -> fc6750139


HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes 
timeout. Contributed by Yiqun Lin.

(cherry picked from commit 4da5000dd33cf013e7212848ed2c44f1e60e860e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc675013
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc675013
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc675013

Branch: refs/heads/branch-2
Commit: fc675013931c3490c8c35429210f7dc35c7ce02e
Parents: 4037b22
Author: Xiaoyu Yao 
Authored: Thu Aug 25 09:00:44 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Aug 25 09:22:02 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc675013/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 776bbe5..d8881a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -699,6 +699,9 @@ public class TestFileTruncate {
 assertEquals(newBlock.getBlock().getGenerationStamp(),
 oldBlock.getBlock().getGenerationStamp() + 1);
 
+Thread.sleep(2000);
+// trigger the second time BR to delete the corrupted replica if there's 
one
+cluster.triggerBlockReports();
 // Wait replicas come to 3
 DFSTestUtil.waitReplication(fs, p, REPLICATION);
 // Old replica is disregarded and replaced with the truncated one


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes timeout. Contributed by Yiqun Lin.

2016-08-25 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3d86110a5 -> 4da5000dd


HDFS-10748. TestFileTruncate#testTruncateWithDataNodesRestart runs sometimes 
timeout. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4da5000d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4da5000d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4da5000d

Branch: refs/heads/trunk
Commit: 4da5000dd33cf013e7212848ed2c44f1e60e860e
Parents: 3d86110
Author: Xiaoyu Yao 
Authored: Thu Aug 25 09:00:44 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Aug 25 09:00:44 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4da5000d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 1032107..dd4dfbd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -700,6 +700,9 @@ public class TestFileTruncate {
 assertEquals(newBlock.getBlock().getGenerationStamp(),
 oldBlock.getBlock().getGenerationStamp() + 1);
 
+Thread.sleep(2000);
+// trigger the second time BR to delete the corrupted replica if there's 
one
+cluster.triggerBlockReports();
 // Wait replicas come to 3
 DFSTestUtil.waitReplication(fs, p, REPLICATION);
 // Old replica is disregarded and replaced with the truncated one


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po

2016-08-25 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6de262cdc -> 4037b22b7


YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4037b22b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4037b22b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4037b22b

Branch: refs/heads/branch-2
Commit: 4037b22b7cb2755df17a65b03f6c0b9f63a476cd
Parents: 6de262c
Author: Jason Lowe 
Authored: Thu Aug 25 14:42:06 2016 +
Committer: Jason Lowe 
Committed: Thu Aug 25 15:10:23 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 45 +---
 1 file changed, 20 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4037b22b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 360ff99..8d68acc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -41,6 +41,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -52,6 +53,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
@@ -1193,35 +1195,27 @@ public class TestYarnClient {
 }
   }
 
-  private MiniYARNCluster setupMiniYARNCluster() {
+  private MiniYARNCluster setupMiniYARNCluster() throws Exception {
 CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
 ReservationSystemTestUtil.setupQueueConfiguration(conf);
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
 ResourceScheduler.class);
 conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-MiniYARNCluster cluster =
+final MiniYARNCluster cluster =
 new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
 
 cluster.init(conf);
 cluster.start();
 
-int attempts;
-for (attempts = 10; attempts > 0; attempts--) {
-  if (cluster.getResourceManager().getRMContext().getReservationSystem()
-  .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
-  .getMemorySize() > 6000) {
-break;
-  }
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-e.printStackTrace();
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getResourceManager().getRMContext()
+.getReservationSystem()
+.getPlan(ReservationSystemTestUtil.reservationQ)
+.getTotalCapacity().getMemorySize() > 6000;
   }
-}
-if (attempts <= 0) {
-  Assert.fail("Exhausted attempts in checking if node capacity was "
-  + "added to the plan");
-}
+}, 10, 1);
 
 return cluster;
   }
@@ -1253,7 +1247,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testCreateReservation() {
+  public void testCreateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1296,7 +1290,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testUpdateReservation() {
+  public void testUpdateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1338,7 +1332,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testListReservationsByReservationId() {
+  public void testListReservationsByReservationId() throws Exception{
 MiniYARNCluster cluster = 

hadoop git commit: YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po (cherry picked from commit 4037b22b7cb2755df17a65b03f6c0b9f63a476cd)

2016-08-25 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 bd729af8a -> 8657dece8


YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po
(cherry picked from commit 4037b22b7cb2755df17a65b03f6c0b9f63a476cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8657dece
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8657dece
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8657dece

Branch: refs/heads/branch-2.8
Commit: 8657dece8bc84034ae85131b6823e2e60860d78e
Parents: bd729af
Author: Jason Lowe 
Authored: Thu Aug 25 14:42:06 2016 +
Committer: Jason Lowe 
Committed: Thu Aug 25 15:11:47 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 45 +---
 1 file changed, 20 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8657dece/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index dcbad49..299e598 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -41,6 +41,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -52,6 +53,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
@@ -1190,35 +1192,27 @@ public class TestYarnClient {
 }
   }
 
-  private MiniYARNCluster setupMiniYARNCluster() {
+  private MiniYARNCluster setupMiniYARNCluster() throws Exception {
 CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
 ReservationSystemTestUtil.setupQueueConfiguration(conf);
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
 ResourceScheduler.class);
 conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-MiniYARNCluster cluster =
+final MiniYARNCluster cluster =
 new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
 
 cluster.init(conf);
 cluster.start();
 
-int attempts;
-for (attempts = 10; attempts > 0; attempts--) {
-  if (cluster.getResourceManager().getRMContext().getReservationSystem()
-  .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
-  .getMemorySize() > 6000) {
-break;
-  }
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-e.printStackTrace();
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getResourceManager().getRMContext()
+.getReservationSystem()
+.getPlan(ReservationSystemTestUtil.reservationQ)
+.getTotalCapacity().getMemorySize() > 6000;
   }
-}
-if (attempts <= 0) {
-  Assert.fail("Exhausted attempts in checking if node capacity was "
-  + "added to the plan");
-}
+}, 10, 1);
 
 return cluster;
   }
@@ -1250,7 +1244,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testCreateReservation() {
+  public void testCreateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1293,7 +1287,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testUpdateReservation() {
+  public void testUpdateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1335,7 +1329,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testListReservationsByReservationId() {
+  public void 

hadoop git commit: Revert "YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po"

2016-08-25 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 782aeabcd -> 6de262cdc


Revert "YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by 
Sean Po"

This reverts commit 782aeabcd55b1e1e933f0b924633d68662b50765.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de262cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de262cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de262cd

Branch: refs/heads/branch-2
Commit: 6de262cdc4bec30359d2fc3b264d5b794960e174
Parents: 782aeab
Author: Jason Lowe 
Authored: Thu Aug 25 14:59:20 2016 +
Committer: Jason Lowe 
Committed: Thu Aug 25 14:59:20 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 43 +++-
 1 file changed, 24 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de262cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index dd19acb..360ff99 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -41,7 +41,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -53,7 +52,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
@@ -1195,7 +1193,7 @@ public class TestYarnClient {
 }
   }
 
-  private MiniYARNCluster setupMiniYARNCluster() throws Exception {
+  private MiniYARNCluster setupMiniYARNCluster() {
 CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
 ReservationSystemTestUtil.setupQueueConfiguration(conf);
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
@@ -1207,15 +1205,23 @@ public class TestYarnClient {
 cluster.init(conf);
 cluster.start();
 
-GenericTestUtils.waitFor(new Supplier() {
-  @Override
-  public Boolean get() {
-return cluster.getResourceManager().getRMContext()
-.getReservationSystem()
-.getPlan(ReservationSystemTestUtil.reservationQ)
-.getTotalCapacity().getMemorySize() > 6000;
+int attempts;
+for (attempts = 10; attempts > 0; attempts--) {
+  if (cluster.getResourceManager().getRMContext().getReservationSystem()
+  .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
+  .getMemorySize() > 6000) {
+break;
+  }
+  try {
+Thread.sleep(100);
+  } catch (InterruptedException e) {
+e.printStackTrace();
   }
-}, 10, 1);
+}
+if (attempts <= 0) {
+  Assert.fail("Exhausted attempts in checking if node capacity was "
+  + "added to the plan");
+}
 
 return cluster;
   }
@@ -1247,7 +1253,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testCreateReservation() throws Exception {
+  public void testCreateReservation() {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1290,7 +1296,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testUpdateReservation() throws Exception {
+  public void testUpdateReservation() {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1332,7 +1338,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testListReservationsByReservationId() throws Exception{
+  public void testListReservationsByReservationId() {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1369,7 +1375,7 @@ public class 

hadoop git commit: YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po (cherry picked from commit 3d86110a5ccfdaff8671fb6ad8f67b4ab66f33da)

2016-08-25 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 952c309dd -> 782aeabcd


YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po
(cherry picked from commit 3d86110a5ccfdaff8671fb6ad8f67b4ab66f33da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/782aeabc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/782aeabc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/782aeabc

Branch: refs/heads/branch-2
Commit: 782aeabcd55b1e1e933f0b924633d68662b50765
Parents: 952c309
Author: Jason Lowe 
Authored: Thu Aug 25 14:42:06 2016 +
Committer: Jason Lowe 
Committed: Thu Aug 25 14:47:40 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 43 +---
 1 file changed, 19 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/782aeabc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 360ff99..dd19acb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -41,6 +41,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -52,6 +53,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
@@ -1193,7 +1195,7 @@ public class TestYarnClient {
 }
   }
 
-  private MiniYARNCluster setupMiniYARNCluster() {
+  private MiniYARNCluster setupMiniYARNCluster() throws Exception {
 CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
 ReservationSystemTestUtil.setupQueueConfiguration(conf);
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
@@ -1205,23 +1207,15 @@ public class TestYarnClient {
 cluster.init(conf);
 cluster.start();
 
-int attempts;
-for (attempts = 10; attempts > 0; attempts--) {
-  if (cluster.getResourceManager().getRMContext().getReservationSystem()
-  .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
-  .getMemorySize() > 6000) {
-break;
-  }
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-e.printStackTrace();
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getResourceManager().getRMContext()
+.getReservationSystem()
+.getPlan(ReservationSystemTestUtil.reservationQ)
+.getTotalCapacity().getMemorySize() > 6000;
   }
-}
-if (attempts <= 0) {
-  Assert.fail("Exhausted attempts in checking if node capacity was "
-  + "added to the plan");
-}
+}, 10, 1);
 
 return cluster;
   }
@@ -1253,7 +1247,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testCreateReservation() {
+  public void testCreateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1296,7 +1290,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testUpdateReservation() {
+  public void testUpdateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1338,7 +1332,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testListReservationsByReservationId() {
+  public void testListReservationsByReservationId() throws Exception{
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1375,7 +1369,7 @@ public class 

hadoop git commit: YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po

2016-08-25 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 52b25fe6a -> 3d86110a5


YARN-5389. TestYarnClient#testReservationDelete fails. Contributed by Sean Po


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d86110a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d86110a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d86110a

Branch: refs/heads/trunk
Commit: 3d86110a5ccfdaff8671fb6ad8f67b4ab66f33da
Parents: 52b25fe
Author: Jason Lowe 
Authored: Thu Aug 25 14:42:06 2016 +
Committer: Jason Lowe 
Committed: Thu Aug 25 14:42:06 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 43 +---
 1 file changed, 19 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d86110a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 360ff99..dd19acb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -41,6 +41,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -52,6 +53,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
@@ -1193,7 +1195,7 @@ public class TestYarnClient {
 }
   }
 
-  private MiniYARNCluster setupMiniYARNCluster() {
+  private MiniYARNCluster setupMiniYARNCluster() throws Exception {
 CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
 ReservationSystemTestUtil.setupQueueConfiguration(conf);
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
@@ -1205,23 +1207,15 @@ public class TestYarnClient {
 cluster.init(conf);
 cluster.start();
 
-int attempts;
-for (attempts = 10; attempts > 0; attempts--) {
-  if (cluster.getResourceManager().getRMContext().getReservationSystem()
-  .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
-  .getMemorySize() > 6000) {
-break;
-  }
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-e.printStackTrace();
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getResourceManager().getRMContext()
+.getReservationSystem()
+.getPlan(ReservationSystemTestUtil.reservationQ)
+.getTotalCapacity().getMemorySize() > 6000;
   }
-}
-if (attempts <= 0) {
-  Assert.fail("Exhausted attempts in checking if node capacity was "
-  + "added to the plan");
-}
+}, 10, 1);
 
 return cluster;
   }
@@ -1253,7 +1247,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testCreateReservation() {
+  public void testCreateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1296,7 +1290,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testUpdateReservation() {
+  public void testUpdateReservation() throws Exception {
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1338,7 +1332,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testListReservationsByReservationId() {
+  public void testListReservationsByReservationId() throws Exception{
 MiniYARNCluster cluster = setupMiniYARNCluster();
 YarnClient client = setupYarnClient(cluster);
 try {
@@ -1375,7 +1369,7 @@ public class TestYarnClient {
   }
 
   @Test
-  public void 

hadoop git commit: HADOOP-13533. Do not require user to set HADOOP_SSH_OPTS to a non-null string, allow setting of an empty string. (Albert Chu via aw) closes #121

2016-08-25 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 601599b03 -> 52b25fe6a


HADOOP-13533. Do not require user to set HADOOP_SSH_OPTS to a non-null string, 
allow setting of an empty string. (Albert Chu via aw) closes #121

Signed-off-by: Allen Wittenauer 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52b25fe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52b25fe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52b25fe6

Branch: refs/heads/trunk
Commit: 52b25fe6ae8f1fb720502032637419cf46fd0027
Parents: 601599b
Author: Albert Chu 
Authored: Mon Aug 22 17:41:41 2016 -0700
Committer: Allen Wittenauer 
Committed: Thu Aug 25 07:11:06 2016 -0700

--
 .../hadoop-common/src/main/bin/hadoop-functions.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52b25fe6/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index b3aa018..75554f0 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -596,7 +596,7 @@ function hadoop_basic_init
   HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
   
HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
   HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
-  HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o 
StrictHostKeyChecking=no -o ConnectTimeout=10s"}
+  HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o 
StrictHostKeyChecking=no -o ConnectTimeout=10s"}
   HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
   HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
   HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13532. Fix typo in hadoop_connect_to_hosts error message (Albert Chu via aw) closes #120

2016-08-25 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 525d52bf7 -> 601599b03


HADOOP-13532. Fix typo in hadoop_connect_to_hosts error message (Albert Chu via 
aw) closes #120


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/601599b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/601599b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/601599b0

Branch: refs/heads/trunk
Commit: 601599b03685c66444d94902485c388f47878cd6
Parents: 525d52b
Author: Allen Wittenauer 
Authored: Thu Aug 25 06:58:42 2016 -0700
Committer: Allen Wittenauer 
Committed: Thu Aug 25 06:58:42 2016 -0700

--
 .../hadoop-common/src/main/bin/hadoop-functions.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/601599b0/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index c380599..b3aa018 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -695,7 +695,7 @@ function hadoop_connect_to_hosts
   #
   # User can specify hostnames or a file where the hostnames are (not both)
   if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
-hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAME were 
defined. Aborting."
+hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were 
defined. Aborting."
 exit 1
   elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
 if [[ -n "${HADOOP_WORKERS}" ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-5042. Mount /sys/fs/cgroup into Docker containers as read only mount. Contributed by luhuichun.

2016-08-25 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 42feab243 -> 952c309dd
  refs/heads/trunk 79603f588 -> 525d52bf7


YARN-5042. Mount /sys/fs/cgroup into Docker containers as read only mount. 
Contributed by luhuichun.

(cherry picked from commit 42d9876471ff0bf0ea240cd48fe483cda8aa1ec7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/952c309d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/952c309d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/952c309d

Branch: refs/heads/branch-2
Commit: 952c309dd993fbb69820891c839961d6b5c7372f
Parents: 42feab2
Author: Varun Vasudev 
Authored: Thu Aug 25 12:56:11 2016 +0530
Committer: Varun Vasudev 
Committed: Thu Aug 25 14:18:12 2016 +0530

--
 .../linux/runtime/DockerLinuxContainerRuntime.java| 7 ---
 .../linux/runtime/docker/DockerRunCommand.java| 7 ++-
 .../linux/runtime/TestDockerContainerRuntime.java | 4 
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/952c309d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index be17af9..2cce1f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -447,7 +447,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 .detachOnRun()
 .setContainerWorkDir(containerWorkDir.toString())
 .setNetworkType(network)
-.setCapabilities(capabilities);
+.setCapabilities(capabilities)
+.addMountLocation("/sys/fs/cgroup", "/sys/fs/cgroup:ro", false);
 List allDirs = new ArrayList<>(containerLocalDirs);
 
 allDirs.addAll(filecacheDirs);
@@ -455,7 +456,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 allDirs.addAll(containerLogDirs);
 allDirs.addAll(userLocalDirs);
 for (String dir: allDirs) {
-  runCommand.addMountLocation(dir, dir);
+  runCommand.addMountLocation(dir, dir, true);
 }
 
 if (environment.containsKey(ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS)) {
@@ -470,7 +471,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
   String src = validateMount(dir[0], localizedResources);
   String dst = dir[1];
-  runCommand.addMountLocation(src, dst + ":ro");
+  runCommand.addMountLocation(src, dst + ":ro", true);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/952c309d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index 7c49ef9..f79f4ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -22,6 +22,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import org.apache.hadoop.util.StringUtils;
 

[2/2] hadoop git commit: YARN-5042. Mount /sys/fs/cgroup into Docker containers as read only mount. Contributed by luhuichun.

2016-08-25 Thread vvasudev
YARN-5042. Mount /sys/fs/cgroup into Docker containers as read only mount. 
Contributed by luhuichun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/525d52bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/525d52bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/525d52bf

Branch: refs/heads/trunk
Commit: 525d52bf7c18d1e334e02e6fb936400c9cf2b0bc
Parents: 79603f5
Author: Varun Vasudev 
Authored: Thu Aug 25 12:56:11 2016 +0530
Committer: Varun Vasudev 
Committed: Thu Aug 25 14:18:26 2016 +0530

--
 .../linux/runtime/DockerLinuxContainerRuntime.java| 7 ---
 .../linux/runtime/docker/DockerRunCommand.java| 7 ++-
 .../linux/runtime/TestDockerContainerRuntime.java | 4 
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/525d52bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index be17af9..2cce1f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -447,7 +447,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 .detachOnRun()
 .setContainerWorkDir(containerWorkDir.toString())
 .setNetworkType(network)
-.setCapabilities(capabilities);
+.setCapabilities(capabilities)
+.addMountLocation("/sys/fs/cgroup", "/sys/fs/cgroup:ro", false);
 List allDirs = new ArrayList<>(containerLocalDirs);
 
 allDirs.addAll(filecacheDirs);
@@ -455,7 +456,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 allDirs.addAll(containerLogDirs);
 allDirs.addAll(userLocalDirs);
 for (String dir: allDirs) {
-  runCommand.addMountLocation(dir, dir);
+  runCommand.addMountLocation(dir, dir, true);
 }
 
 if (environment.containsKey(ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS)) {
@@ -470,7 +471,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
   String src = validateMount(dir[0], localizedResources);
   String dst = dir[1];
-  runCommand.addMountLocation(src, dst + ":ro");
+  runCommand.addMountLocation(src, dst + ":ro", true);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/525d52bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index 7c49ef9..f79f4ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -22,6 +22,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import org.apache.hadoop.util.StringUtils;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
@@ -59,7 +60,11 @@ public class DockerRunCommand extends DockerCommand {
   }
 
   public 

hadoop git commit: YARN-5537. Fix intermittent failure of TestAMRMClient#testAMRMClientWithContainerResourceChange (Bibin A Chundatt via Varun Saxena)

2016-08-25 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ba3257baf -> 42feab243


YARN-5537. Fix intermittent failure of 
TestAMRMClient#testAMRMClientWithContainerResourceChange (Bibin A Chundatt via 
Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42feab24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42feab24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42feab24

Branch: refs/heads/branch-2
Commit: 42feab243614979bc708cbb3fa46f807d7ddd344
Parents: ba3257b
Author: Varun Saxena 
Authored: Thu Aug 25 14:12:28 2016 +0530
Committer: Varun Saxena 
Committed: Thu Aug 25 14:12:28 2016 +0530

--
 .../yarn/client/api/impl/TestAMRMClient.java| 47 +++-
 1 file changed, 27 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42feab24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 57cdbfb..1eeeb78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -1007,26 +1007,33 @@ public class TestAMRMClient {
 Assert.assertEquals(2, amClientImpl.pendingChange.size());
 // as of now: container1 asks to decrease to (512, 1)
 //container2 asks to increase to (2048, 1)
-// send allocation requests
-AllocateResponse allocResponse = amClient.allocate(0.1f);
-Assert.assertEquals(0, amClientImpl.change.size());
-// we should get decrease confirmation right away
-List decreasedContainers =
-allocResponse.getDecreasedContainers();
-List increasedContainers =
-allocResponse.getIncreasedContainers();
-Assert.assertEquals(1, decreasedContainers.size());
-Assert.assertEquals(0, increasedContainers.size());
-// we should get increase allocation after the next NM's heartbeat to RM
-sleep(150);
-// get allocations
-allocResponse = amClient.allocate(0.1f);
-decreasedContainers =
-allocResponse.getDecreasedContainers();
-increasedContainers =
-allocResponse.getIncreasedContainers();
-Assert.assertEquals(1, increasedContainers.size());
-Assert.assertEquals(0, decreasedContainers.size());
+List decreasedContainers;
+List increasedContainers;
+int allocateAttempts = 0;
+int decreased = 0;
+int increased = 0;
+while (allocateAttempts < 30) {
+  // send allocation requests
+  AllocateResponse allocResponse = amClient.allocate(0.1f);
+  decreasedContainers = allocResponse.getDecreasedContainers();
+  increasedContainers = allocResponse.getIncreasedContainers();
+  decreased += decreasedContainers.size();
+  increased += increasedContainers.size();
+  if (allocateAttempts == 0) {
+// we should get decrease confirmation right away
+Assert.assertEquals(1, decreased);
+// After first allocate request check change size
+Assert.assertEquals(0, amClientImpl.change.size());
+  } else if (increased == 1) {
+break;
+  }
+  // increase request is served after next NM heart beat is received
+  // Sleeping and retrying allocate
+  sleep(20);
+  allocateAttempts++;
+}
+Assert.assertEquals(1, decreased);
+Assert.assertEquals(1, increased);
   }
 
   private void testAllocation(final AMRMClientImpl amClient)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5537. Fix intermittent failure of TestAMRMClient#testAMRMClientWithContainerResourceChange (Bibin A Chundatt via Varun Saxena)

2016-08-25 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab3b727b5 -> 79603f588


YARN-5537. Fix intermittent failure of 
TestAMRMClient#testAMRMClientWithContainerResourceChange (Bibin A Chundatt via 
Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79603f58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79603f58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79603f58

Branch: refs/heads/trunk
Commit: 79603f588238d47c77748682add0502fb89bbf48
Parents: ab3b727
Author: Varun Saxena 
Authored: Thu Aug 25 14:10:34 2016 +0530
Committer: Varun Saxena 
Committed: Thu Aug 25 14:10:34 2016 +0530

--
 .../yarn/client/api/impl/TestAMRMClient.java| 47 +++-
 1 file changed, 27 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79603f58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 57cdbfb..1eeeb78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -1007,26 +1007,33 @@ public class TestAMRMClient {
 Assert.assertEquals(2, amClientImpl.pendingChange.size());
 // as of now: container1 asks to decrease to (512, 1)
 //container2 asks to increase to (2048, 1)
-// send allocation requests
-AllocateResponse allocResponse = amClient.allocate(0.1f);
-Assert.assertEquals(0, amClientImpl.change.size());
-// we should get decrease confirmation right away
-List decreasedContainers =
-allocResponse.getDecreasedContainers();
-List increasedContainers =
-allocResponse.getIncreasedContainers();
-Assert.assertEquals(1, decreasedContainers.size());
-Assert.assertEquals(0, increasedContainers.size());
-// we should get increase allocation after the next NM's heartbeat to RM
-sleep(150);
-// get allocations
-allocResponse = amClient.allocate(0.1f);
-decreasedContainers =
-allocResponse.getDecreasedContainers();
-increasedContainers =
-allocResponse.getIncreasedContainers();
-Assert.assertEquals(1, increasedContainers.size());
-Assert.assertEquals(0, decreasedContainers.size());
+List decreasedContainers;
+List increasedContainers;
+int allocateAttempts = 0;
+int decreased = 0;
+int increased = 0;
+while (allocateAttempts < 30) {
+  // send allocation requests
+  AllocateResponse allocResponse = amClient.allocate(0.1f);
+  decreasedContainers = allocResponse.getDecreasedContainers();
+  increasedContainers = allocResponse.getIncreasedContainers();
+  decreased += decreasedContainers.size();
+  increased += increasedContainers.size();
+  if (allocateAttempts == 0) {
+// we should get decrease confirmation right away
+Assert.assertEquals(1, decreased);
+// After first allocate request check change size
+Assert.assertEquals(0, amClientImpl.change.size());
+  } else if (increased == 1) {
+break;
+  }
+  // increase request is served after next NM heart beat is received
+  // Sleeping and retrying allocate
+  sleep(20);
+  allocateAttempts++;
+}
+Assert.assertEquals(1, decreased);
+Assert.assertEquals(1, increased);
   }
 
   private void testAllocation(final AMRMClientImpl amClient)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org