hadoop git commit: HDFS-13544. Improve logging for JournalNode in federated cluster.

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6653f4ba2 -> 6beb25ab7


HDFS-13544. Improve logging for JournalNode in federated cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6beb25ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6beb25ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6beb25ab

Branch: refs/heads/trunk
Commit: 6beb25ab7e4f5454dba0315a296081e61753f301
Parents: 6653f4b
Author: Hanisha Koneru 
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon May 14 10:12:08 2018 -0700

--
 .../hadoop/hdfs/qjournal/server/Journal.java| 115 +++
 1 file changed, 64 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6beb25ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 408ce76..452664a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -208,11 +208,12 @@ public class Journal implements Closeable {
 while (!files.isEmpty()) {
   EditLogFile latestLog = files.remove(files.size() - 1);
   latestLog.scanLog(Long.MAX_VALUE, false);
-  LOG.info("Latest log is " + latestLog);
+  LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
   if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
 // the log contains no transactions
 LOG.warn("Latest log " + latestLog + " has no transactions. " +
-"moving it aside and looking for previous log");
+"moving it aside and looking for previous log"
++ " ; journal id: " + journalId);
 latestLog.moveAsideEmptyFile();
   } else {
 return latestLog;
@@ -230,7 +231,7 @@ public class Journal implements Closeable {
 Preconditions.checkState(nsInfo.getNamespaceID() != 0,
 "can't format with uninitialized namespace info: %s",
 nsInfo);
-LOG.info("Formatting " + this + " with namespace info: " +
+LOG.info("Formatting journal id : " + journalId + " with namespace info: " 
+
 nsInfo);
 storage.format(nsInfo);
 refreshCachedData();
@@ -323,7 +324,7 @@ public class Journal implements Closeable {
 // any other that we've promised. 
 if (epoch <= getLastPromisedEpoch()) {
   throw new IOException("Proposed epoch " + epoch + " <= last promise " +
-  getLastPromisedEpoch());
+  getLastPromisedEpoch() + " ; journal id: " + journalId);
 }
 
 updateLastPromisedEpoch(epoch);
@@ -343,7 +344,8 @@ public class Journal implements Closeable {
 
   private void updateLastPromisedEpoch(long newEpoch) throws IOException {
 LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
-" to " + newEpoch + " for client " + Server.getRemoteIp());
+" to " + newEpoch + " for client " + Server.getRemoteIp() +
+" ; journal id: " + journalId);
 lastPromisedEpoch.set(newEpoch);
 
 // Since we have a new writer, reset the IPC serial - it will start
@@ -378,7 +380,7 @@ public class Journal implements Closeable {
 }
 
 checkSync(curSegment != null,
-"Can't write, no segment open");
+"Can't write, no segment open" + " ; journal id: " + journalId);
 
 if (curSegmentTxId != segmentTxId) {
   // Sanity check: it is possible that the writer will fail IPCs
@@ -389,17 +391,20 @@ public class Journal implements Closeable {
   // and throw an exception.
   JournalOutOfSyncException e = new JournalOutOfSyncException(
   "Writer out of sync: it thinks it is writing segment " + segmentTxId
-  + " but current segment is " + curSegmentTxId);
+  + " but current segment is " + curSegmentTxId
+  + " ; journal id: " + journalId);
   abortCurSegment();
   throw e;
 }
   
 checkSync(nextTxId == firstTxnId,
-"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
++ " ; journal id: " + journalId);
 
 long lastTxnId = firstTxnId + numTxns - 1;
 if (LOG.isTraceEnabled()) {
-  LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId);
+  LOG.trace("Writing txid " + 

hadoop git commit: HDFS-13544. Improve logging for JournalNode in federated cluster.

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eabc588d2 -> c998c126b


HDFS-13544. Improve logging for JournalNode in federated cluster.

(cherry picked from commit 6beb25ab7e4f5454dba0315a296081e61753f301)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c998c126
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c998c126
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c998c126

Branch: refs/heads/branch-2
Commit: c998c126b89b226ea71e8aa8bb57960e6208b253
Parents: eabc588
Author: Hanisha Koneru 
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon May 14 10:21:59 2018 -0700

--
 .../hadoop/hdfs/qjournal/server/Journal.java| 110 ++-
 1 file changed, 61 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c998c126/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 93d3c41..f63fea8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -193,11 +193,12 @@ public class Journal implements Closeable {
 while (!files.isEmpty()) {
   EditLogFile latestLog = files.remove(files.size() - 1);
   latestLog.scanLog(Long.MAX_VALUE, false);
-  LOG.info("Latest log is " + latestLog);
+  LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
   if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
 // the log contains no transactions
 LOG.warn("Latest log " + latestLog + " has no transactions. " +
-"moving it aside and looking for previous log");
+"moving it aside and looking for previous log"
++ " ; journal id: " + journalId);
 latestLog.moveAsideEmptyFile();
   } else {
 return latestLog;
@@ -215,7 +216,7 @@ public class Journal implements Closeable {
 Preconditions.checkState(nsInfo.getNamespaceID() != 0,
 "can't format with uninitialized namespace info: %s",
 nsInfo);
-LOG.info("Formatting " + this + " with namespace info: " +
+LOG.info("Formatting journal id : " + journalId + " with namespace info: " 
+
 nsInfo);
 storage.format(nsInfo);
 refreshCachedData();
@@ -309,7 +310,7 @@ public class Journal implements Closeable {
 // any other that we've promised. 
 if (epoch <= getLastPromisedEpoch()) {
   throw new IOException("Proposed epoch " + epoch + " <= last promise " +
-  getLastPromisedEpoch());
+  getLastPromisedEpoch() + " ; journal id: " + journalId);
 }
 
 updateLastPromisedEpoch(epoch);
@@ -329,7 +330,8 @@ public class Journal implements Closeable {
 
   private void updateLastPromisedEpoch(long newEpoch) throws IOException {
 LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
-" to " + newEpoch + " for client " + Server.getRemoteIp());
+" to " + newEpoch + " for client " + Server.getRemoteIp() +
+" ; journal id: " + journalId);
 lastPromisedEpoch.set(newEpoch);
 
 // Since we have a new writer, reset the IPC serial - it will start
@@ -358,8 +360,7 @@ public class Journal implements Closeable {
 checkWriteRequest(reqInfo);
 
 checkSync(curSegment != null,
-"Can't write, no segment open");
-
+"Can't write, no segment open" + " ; journal id: " + journalId);
 if (curSegmentTxId != segmentTxId) {
   // Sanity check: it is possible that the writer will fail IPCs
   // on both the finalize() and then the start() of the next segment.
@@ -369,17 +370,20 @@ public class Journal implements Closeable {
   // and throw an exception.
   JournalOutOfSyncException e = new JournalOutOfSyncException(
   "Writer out of sync: it thinks it is writing segment " + segmentTxId
-  + " but current segment is " + curSegmentTxId);
+  + " but current segment is " + curSegmentTxId
+  + " ; journal id: " + journalId);
   abortCurSegment();
   throw e;
 }
   
 checkSync(nextTxId == firstTxnId,
-"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
++ " ; journal id: " + journalId);
 
 long 

hadoop git commit: HDFS-13544. Improve logging for JournalNode in federated cluster.

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 220e38ec0 -> b00c0a08a


HDFS-13544. Improve logging for JournalNode in federated cluster.

(cherry picked from commit 6beb25ab7e4f5454dba0315a296081e61753f301)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b00c0a08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b00c0a08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b00c0a08

Branch: refs/heads/branch-3.0
Commit: b00c0a08a3c1ca3008dd2e03f9feff59080e392e
Parents: 220e38e
Author: Hanisha Koneru 
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon May 14 10:16:06 2018 -0700

--
 .../hadoop/hdfs/qjournal/server/Journal.java| 115 +++
 1 file changed, 64 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b00c0a08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 408ce76..452664a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -208,11 +208,12 @@ public class Journal implements Closeable {
 while (!files.isEmpty()) {
   EditLogFile latestLog = files.remove(files.size() - 1);
   latestLog.scanLog(Long.MAX_VALUE, false);
-  LOG.info("Latest log is " + latestLog);
+  LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
   if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
 // the log contains no transactions
 LOG.warn("Latest log " + latestLog + " has no transactions. " +
-"moving it aside and looking for previous log");
+"moving it aside and looking for previous log"
++ " ; journal id: " + journalId);
 latestLog.moveAsideEmptyFile();
   } else {
 return latestLog;
@@ -230,7 +231,7 @@ public class Journal implements Closeable {
 Preconditions.checkState(nsInfo.getNamespaceID() != 0,
 "can't format with uninitialized namespace info: %s",
 nsInfo);
-LOG.info("Formatting " + this + " with namespace info: " +
+LOG.info("Formatting journal id : " + journalId + " with namespace info: " 
+
 nsInfo);
 storage.format(nsInfo);
 refreshCachedData();
@@ -323,7 +324,7 @@ public class Journal implements Closeable {
 // any other that we've promised. 
 if (epoch <= getLastPromisedEpoch()) {
   throw new IOException("Proposed epoch " + epoch + " <= last promise " +
-  getLastPromisedEpoch());
+  getLastPromisedEpoch() + " ; journal id: " + journalId);
 }
 
 updateLastPromisedEpoch(epoch);
@@ -343,7 +344,8 @@ public class Journal implements Closeable {
 
   private void updateLastPromisedEpoch(long newEpoch) throws IOException {
 LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
-" to " + newEpoch + " for client " + Server.getRemoteIp());
+" to " + newEpoch + " for client " + Server.getRemoteIp() +
+" ; journal id: " + journalId);
 lastPromisedEpoch.set(newEpoch);
 
 // Since we have a new writer, reset the IPC serial - it will start
@@ -378,7 +380,7 @@ public class Journal implements Closeable {
 }
 
 checkSync(curSegment != null,
-"Can't write, no segment open");
+"Can't write, no segment open" + " ; journal id: " + journalId);
 
 if (curSegmentTxId != segmentTxId) {
   // Sanity check: it is possible that the writer will fail IPCs
@@ -389,17 +391,20 @@ public class Journal implements Closeable {
   // and throw an exception.
   JournalOutOfSyncException e = new JournalOutOfSyncException(
   "Writer out of sync: it thinks it is writing segment " + segmentTxId
-  + " but current segment is " + curSegmentTxId);
+  + " but current segment is " + curSegmentTxId
+  + " ; journal id: " + journalId);
   abortCurSegment();
   throw e;
 }
   
 checkSync(nextTxId == firstTxnId,
-"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
++ " ; journal id: " + journalId);
 
 long lastTxnId = firstTxnId + numTxns - 1;
 if (LOG.isTraceEnabled()) {
-  LOG.trace("Writing txid 

hadoop git commit: HDFS-13544. Improve logging for JournalNode in federated cluster.

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 6e731eb20 -> 4f5594139


HDFS-13544. Improve logging for JournalNode in federated cluster.

(cherry picked from commit 6beb25ab7e4f5454dba0315a296081e61753f301)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f559413
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f559413
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f559413

Branch: refs/heads/branch-3.1
Commit: 4f55941390a867034df5dfb0b1122636cff3e8c6
Parents: 6e731eb
Author: Hanisha Koneru 
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon May 14 10:14:38 2018 -0700

--
 .../hadoop/hdfs/qjournal/server/Journal.java| 115 +++
 1 file changed, 64 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f559413/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 408ce76..452664a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -208,11 +208,12 @@ public class Journal implements Closeable {
 while (!files.isEmpty()) {
   EditLogFile latestLog = files.remove(files.size() - 1);
   latestLog.scanLog(Long.MAX_VALUE, false);
-  LOG.info("Latest log is " + latestLog);
+  LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
   if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
 // the log contains no transactions
 LOG.warn("Latest log " + latestLog + " has no transactions. " +
-"moving it aside and looking for previous log");
+"moving it aside and looking for previous log"
++ " ; journal id: " + journalId);
 latestLog.moveAsideEmptyFile();
   } else {
 return latestLog;
@@ -230,7 +231,7 @@ public class Journal implements Closeable {
 Preconditions.checkState(nsInfo.getNamespaceID() != 0,
 "can't format with uninitialized namespace info: %s",
 nsInfo);
-LOG.info("Formatting " + this + " with namespace info: " +
+LOG.info("Formatting journal id : " + journalId + " with namespace info: " 
+
 nsInfo);
 storage.format(nsInfo);
 refreshCachedData();
@@ -323,7 +324,7 @@ public class Journal implements Closeable {
 // any other that we've promised. 
 if (epoch <= getLastPromisedEpoch()) {
   throw new IOException("Proposed epoch " + epoch + " <= last promise " +
-  getLastPromisedEpoch());
+  getLastPromisedEpoch() + " ; journal id: " + journalId);
 }
 
 updateLastPromisedEpoch(epoch);
@@ -343,7 +344,8 @@ public class Journal implements Closeable {
 
   private void updateLastPromisedEpoch(long newEpoch) throws IOException {
 LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
-" to " + newEpoch + " for client " + Server.getRemoteIp());
+" to " + newEpoch + " for client " + Server.getRemoteIp() +
+" ; journal id: " + journalId);
 lastPromisedEpoch.set(newEpoch);
 
 // Since we have a new writer, reset the IPC serial - it will start
@@ -378,7 +380,7 @@ public class Journal implements Closeable {
 }
 
 checkSync(curSegment != null,
-"Can't write, no segment open");
+"Can't write, no segment open" + " ; journal id: " + journalId);
 
 if (curSegmentTxId != segmentTxId) {
   // Sanity check: it is possible that the writer will fail IPCs
@@ -389,17 +391,20 @@ public class Journal implements Closeable {
   // and throw an exception.
   JournalOutOfSyncException e = new JournalOutOfSyncException(
   "Writer out of sync: it thinks it is writing segment " + segmentTxId
-  + " but current segment is " + curSegmentTxId);
+  + " but current segment is " + curSegmentTxId
+  + " ; journal id: " + journalId);
   abortCurSegment();
   throw e;
 }
   
 checkSync(nextTxId == firstTxnId,
-"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+"Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
++ " ; journal id: " + journalId);
 
 long lastTxnId = firstTxnId + numTxns - 1;
 if (LOG.isTraceEnabled()) {
-  LOG.trace("Writing txid 

[hadoop] Git Push Summary

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/remotes/origin/HDDS-48 [created] 2d00a0c71

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-05-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 [created] 2d00a0c71

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/18] hadoop git commit: YARN-8290. SystemMetricsPublisher.appACLsUpdated should be invoked after application information is published to ATS to avoid "User is not set in the application report" Exc

2018-05-23 Thread hanishakoneru
YARN-8290. SystemMetricsPublisher.appACLsUpdated should be invoked after 
application information is published to ATS to avoid "User is not set in the 
application report" Exception. (Eric Yang via wangda)

Change-Id: I0ac6ddd19740d1aa7dd07111cd11af71ddc2fcaf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd15d239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd15d239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd15d239

Branch: refs/heads/HDDS-48
Commit: bd15d2396ef0c24fb6b60c6393d16b37651b828e
Parents: 523f602
Author: Wangda Tan 
Authored: Tue May 22 13:25:15 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 22 13:33:33 2018 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java | 5 -
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java | 5 +
 .../hadoop/yarn/server/resourcemanager/TestAppManager.java  | 4 
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java   | 1 +
 4 files changed, 6 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd15d239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 2983077..3e64cfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
@@ -466,10 +465,6 @@ public class RMAppManager implements 
EventHandler,
 // Inform the ACLs Manager
 this.applicationACLsManager.addApplication(applicationId,
 submissionContext.getAMContainerSpec().getApplicationACLs());
-String appViewACLs = submissionContext.getAMContainerSpec()
-.getApplicationACLs().get(ApplicationAccessType.VIEW_APP);
-rmContext.getSystemMetricsPublisher().appACLsUpdated(
-application, appViewACLs, System.currentTimeMillis());
 return application;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd15d239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index daf14c4..6aee813 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringInterner;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -2020,6 +2021,10 @@ public class RMAppImpl implements RMApp, Recoverable {
   private void sendATSCreateEvent() {
 rmContext.getRMApplicationHistoryWriter().applicationStarted(this);
 

[16/18] hadoop git commit: HDDS-44. Ozone: start-ozone.sh fail to start datanode because of incomplete classpaths. Contributed by Mukul Kumar Singh.

2018-05-23 Thread hanishakoneru
HDDS-44. Ozone: start-ozone.sh fail to start datanode because of incomplete 
classpaths.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e83b943f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e83b943f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e83b943f

Branch: refs/heads/HDDS-48
Commit: e83b943fed53c8082a699e0601c2f8e8db0f8ffe
Parents: 63fc587
Author: Anu Engineer 
Authored: Wed May 23 09:29:35 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 23 09:29:35 2018 -0700

--
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 116 ++-
 1 file changed, 111 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83b943f/hadoop-ozone/common/src/main/bin/start-ozone.sh
--
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh 
b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index dda0a1c..92bc4a8 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -47,6 +47,26 @@ else
   exit 1
 fi
 
+# get arguments
+if [[ $# -ge 1 ]]; then
+  startOpt="$1"
+  shift
+  case "$startOpt" in
+-upgrade)
+  nameStartOpt="$startOpt"
+;;
+-rollback)
+  dataStartOpt="$startOpt"
+;;
+*)
+  hadoop_exit_with_usage 1
+;;
+  esac
+fi
+
+#Add other possible options
+nameStartOpt="$nameStartOpt $*"
+
 SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
 SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
@@ -65,11 +85,97 @@ fi
 
 #-
 # Start hdfs before starting ozone daemons
-if [[ -f "${bin}/start-dfs.sh" ]]; then
-  "${bin}/start-dfs.sh"
-else
-  echo "ERROR: Cannot execute ${bin}/start-dfs.sh." 2>&1
-  exit 1
+
+#-
+# namenodes
+
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
+
+if [[ -z "${NAMENODES}" ]]; then
+  NAMENODES=$(hostname)
+fi
+
+echo "Starting namenodes on [${NAMENODES}]"
+hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+--daemon start \
+namenode ${nameStartOpt}
+
+HADOOP_JUMBO_RETCOUNTER=$?
+
+#-
+# datanodes (using default workers file)
+
+echo "Starting datanodes"
+hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--daemon start \
+datanode ${dataStartOpt}
+(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+
+#-
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf 
-secondarynamenodes 2>/dev/null)
+
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+
+  if [[ "${NAMENODES}" =~ , ]]; then
+
+hadoop_error "WARNING: Highly available NameNode is configured."
+hadoop_error "WARNING: Skipping SecondaryNameNode."
+
+  else
+
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
+
+echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
+
+hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+  --workers \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  --daemon start \
+  secondarynamenode
+(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+  fi
+fi
+
+#-
+# quorumjournal nodes (if any)
+
+JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
+
+if [[ "${#JOURNAL_NODES}" != 0 ]]; then
+  echo "Starting journal nodes [${JOURNAL_NODES}]"
+
+  hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${JOURNAL_NODES}" \
+--daemon start \
+journalnode
+   (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+fi
+
+#-
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey 
dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
+
+  hadoop_uservar_su hdfs zkfc 

[10/18] hadoop git commit: Additional check when unpacking archives. Contributed by Jason Lowe and Akira Ajisaka.

2018-05-23 Thread hanishakoneru
Additional check when unpacking archives. Contributed by Jason Lowe and Akira 
Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/745f203e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/745f203e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/745f203e

Branch: refs/heads/HDDS-48
Commit: 745f203e577bacb35b042206db94615141fa5e6f
Parents: 1d2640b
Author: Akira Ajisaka 
Authored: Wed May 23 17:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 23 17:16:23 2018 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 18 -
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 40 +---
 2 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 8743be5..5ef78f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -617,11 +617,16 @@ public class FileUtil {
   throws IOException {
 try (ZipInputStream zip = new ZipInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for(ZipEntry entry = zip.getNextEntry();
   entry != null;
   entry = zip.getNextEntry()) {
 if (!entry.isDirectory()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   File parent = file.getParentFile();
   if (!parent.mkdirs() &&
   !parent.isDirectory()) {
@@ -656,12 +661,17 @@ public class FileUtil {
 
 try {
   entries = zipFile.entries();
+  String targetDirPath = unzipDir.getCanonicalPath() + File.separator;
   while (entries.hasMoreElements()) {
 ZipEntry entry = entries.nextElement();
 if (!entry.isDirectory()) {
   InputStream in = zipFile.getInputStream(entry);
   try {
 File file = new File(unzipDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + unzipDir);
+}
 if (!file.getParentFile().mkdirs()) {
   if (!file.getParentFile().isDirectory()) {
 throw new IOException("Mkdirs failed to create " +
@@ -944,6 +954,13 @@ public class FileUtil {
 
   private static void unpackEntries(TarArchiveInputStream tis,
   TarArchiveEntry entry, File outputDir) throws IOException {
+String targetDirPath = outputDir.getCanonicalPath() + File.separator;
+File outputFile = new File(outputDir, entry.getName());
+if (!outputFile.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create entry outside of " + outputDir);
+}
+
 if (entry.isDirectory()) {
   File subDir = new File(outputDir, entry.getName());
   if (!subDir.mkdirs() && !subDir.isDirectory()) {
@@ -966,7 +983,6 @@ public class FileUtil {
   return;
 }
 
-File outputFile = new File(outputDir, entry.getName());
 if (!outputFile.getParentFile().exists()) {
   if (!outputFile.getParentFile().mkdirs()) {
 throw new IOException("Mkdirs failed to create tar internal dir "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 39f2f6b..7218a1b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static 

[06/18] hadoop git commit: HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda kumar.

2018-05-23 Thread hanishakoneru
HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda 
kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c7fd8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c7fd8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c7fd8e

Branch: refs/heads/HDDS-48
Commit: 68c7fd8e6092e8436ecf96852c608708f311f262
Parents: 43be9ab
Author: Xiaoyu Yao 
Authored: Tue May 22 15:46:59 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue May 22 15:46:59 2018 -0700

--
 .../common/impl/ContainerManagerImpl.java   | 14 +---
 .../common/impl/ContainerReportManagerImpl.java | 43 +++-
 .../common/interfaces/ContainerManager.java |  7 --
 .../interfaces/ContainerReportManager.java  |  8 +--
 .../statemachine/DatanodeStateMachine.java  |  1 -
 .../common/statemachine/StateContext.java   | 38 --
 .../states/endpoint/HeartbeatEndpointTask.java  |  3 +-
 .../container/ozoneimpl/OzoneContainer.java |  9 ---
 .../StorageContainerDatanodeProtocol.java   |  5 +-
 .../protocol/StorageContainerNodeProtocol.java  |  5 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  5 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |  3 +-
 .../StorageContainerDatanodeProtocol.proto  | 39 ---
 .../ozone/container/common/ScmTestMock.java | 13 +---
 .../common/TestDatanodeStateMachine.java|  7 --
 .../hdds/scm/node/HeartbeatQueueItem.java   | 23 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 30 +---
 .../scm/server/SCMDatanodeProtocolServer.java   |  6 +-
 .../hdds/scm/container/MockNodeManager.java |  5 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  9 +--
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 74 +---
 .../ozone/container/common/TestEndPoint.java| 11 +--
 .../testutils/ReplicationNodeManagerMock.java   |  5 +-
 .../ozone/TestStorageContainerManager.java  |  5 +-
 24 files changed, 63 insertions(+), 305 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 3a78c70..faee5d0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
@@ -1072,16 +1070,8 @@ public class ContainerManagerImpl implements 
ContainerManager {
   @Override
   public long getNumKeys(long containerId) {
 ContainerData cData = containerMap.get(containerId);
-return cData.getKeyCount();  }
-
-  /**
-   * Get the container report state to send via HB to SCM.
-   *
-   * @return container report state.
-   */
-  @Override
-  public ReportState getContainerReportState() {
-return containerReportManager.getContainerReportState();
+return cData.getKeyCount();
   }
 
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
index 6c83c66..f1d3f7f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import org.apache.commons.lang3.RandomUtils;
 import 

[08/18] hadoop git commit: HDDS-49. Standalone protocol should use grpc in place of netty. Contributed by Mukul Kumar Singh.

2018-05-23 Thread hanishakoneru
HDDS-49. Standalone protocol should use grpc in place of netty.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a914069
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a914069
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a914069

Branch: refs/heads/HDDS-48
Commit: 5a9140690aba295ba1226a3190b52f34347a8372
Parents: 3e5f7ea
Author: Anu Engineer 
Authored: Tue May 22 16:51:43 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 19:56:15 2018 -0700

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 217 +++
 .../hadoop/hdds/scm/XceiverClientManager.java   |  21 +-
 .../hadoop/hdds/scm/XceiverClientMetrics.java   |   8 +-
 .../common/dev-support/findbugsExcludeFile.xml  |   3 +
 hadoop-hdds/common/pom.xml  |  17 ++
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   4 +
 .../main/proto/DatanodeContainerProtocol.proto  |   7 +
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../common/helpers/ContainerMetrics.java|  14 +-
 .../transport/server/GrpcXceiverService.java|  82 +++
 .../transport/server/XceiverServerGrpc.java | 105 +
 .../container/ozoneimpl/OzoneContainer.java |  11 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  10 +-
 .../ozone/scm/TestXceiverClientManager.java |  67 --
 hadoop-project/pom.xml  |   1 +
 15 files changed, 540 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a914069/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
new file mode 100644
index 000..84790e8
--- /dev/null
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.shaded.io.grpc.ManagedChannel;
+import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public class XceiverClientGrpc extends XceiverClientSpi {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
+  private final Pipeline pipeline;
+  private final Configuration config;
+  private XceiverClientProtocolServiceStub asyncStub;
+  private XceiverClientMetrics metrics;
+  private ManagedChannel channel;
+  private final Semaphore semaphore;
+
+  /**
+   * 

[12/18] hadoop git commit: HDFS-13540. DFSStripedInputStream should only allocate new buffers when reading. Contributed by Xiao Chen.

2018-05-23 Thread hanishakoneru
HDFS-13540. DFSStripedInputStream should only allocate new buffers when 
reading. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34e8b9f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34e8b9f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34e8b9f9

Branch: refs/heads/HDDS-48
Commit: 34e8b9f9a86fb03156861482643fba11bdee1dd4
Parents: fed2bef
Author: Sammi Chen 
Authored: Wed May 23 19:10:09 2018 +0800
Committer: Sammi Chen 
Committed: Wed May 23 19:10:09 2018 +0800

--
 .../apache/hadoop/io/ElasticByteBufferPool.java | 12 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 12 +++---
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 45 
 3 files changed, 64 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 023f37f..9dd7771 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -116,4 +116,16 @@ public final class ElasticByteBufferPool implements 
ByteBufferPool {
   // poor granularity.
 }
   }
+
+  /**
+   * Get the size of the buffer pool, for the specified buffer type.
+   *
+   * @param direct Whether the size is returned for direct buffers
+   * @return The size
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public int size(boolean direct) {
+return getBufferTree(direct).size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index f3b16e0..5557a50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -116,12 +116,14 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return decoder.preferDirectBuffer();
   }
 
-  void resetCurStripeBuffer() {
-if (curStripeBuf == null) {
+  private void resetCurStripeBuffer(boolean shouldAllocateBuf) {
+if (shouldAllocateBuf && curStripeBuf == null) {
   curStripeBuf = BUFFER_POOL.getBuffer(useDirectBuffer(),
   cellSize * dataBlkNum);
 }
-curStripeBuf.clear();
+if (curStripeBuf != null) {
+  curStripeBuf.clear();
+}
 curStripeRange = new StripeRange(0, 0);
   }
 
@@ -206,7 +208,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   @Override
   protected void closeCurrentBlockReaders() {
-resetCurStripeBuffer();
+resetCurStripeBuffer(false);
 if (blockReaders ==  null || blockReaders.length == 0) {
   return;
 }
@@ -296,7 +298,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   private void readOneStripe(CorruptedBlocks corruptedBlocks)
   throws IOException {
-resetCurStripeBuffer();
+resetCurStripeBuffer(true);
 
 // compute stripe range based on pos
 final long offsetInBlockGroup = getOffsetInBlockGroup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index cdebee0..422746e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 

[04/18] hadoop git commit: YARN-8273. Log aggregation does not warn if HDFS quota in target directory is exceeded (grepas via rkanter)

2018-05-23 Thread hanishakoneru
YARN-8273. Log aggregation does not warn if HDFS quota in target directory is 
exceeded (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b22f56c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b22f56c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b22f56c4

Branch: refs/heads/HDDS-48
Commit: b22f56c4719e63bd4f6edc2a075e0bcdb9442255
Parents: 83f53e5
Author: Robert Kanter 
Authored: Tue May 22 14:24:38 2018 -0700
Committer: Robert Kanter 
Committed: Tue May 22 14:24:38 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  4 ++
 .../logaggregation/AggregatedLogFormat.java | 14 +++-
 .../LogAggregationDFSException.java | 45 
 .../LogAggregationFileController.java   |  4 +-
 .../tfile/LogAggregationTFileController.java| 13 +++-
 .../logaggregation/TestContainerLogsUtils.java  |  4 +-
 .../logaggregation/AppLogAggregatorImpl.java| 49 ++---
 .../TestAppLogAggregatorImpl.java   | 75 +---
 .../nodemanager/webapp/TestNMWebServices.java   |  7 +-
 9 files changed, 183 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index db6c11a..a25c524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -40,6 +40,10 @@
   hadoop-common
   provided
 
+
+  org.apache.hadoop
+  hadoop-hdfs-client
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index af3066e..81d5053 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
@@ -547,7 +548,7 @@ public class AggregatedLogFormat {
 }
 
 @Override
-public void close() {
+public void close() throws DSQuotaExceededException {
   try {
 if (writer != null) {
   writer.close();
@@ -555,7 +556,16 @@ public class AggregatedLogFormat {
   } catch (Exception e) {
 LOG.warn("Exception closing writer", e);
   } finally {
-IOUtils.cleanupWithLogger(LOG, this.fsDataOStream);
+try {
+  this.fsDataOStream.close();
+} catch (DSQuotaExceededException e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+  throw e;
+} catch (Throwable e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
new file mode 100644
index 000..19953e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  

[13/18] hadoop git commit: YARN-8285. Remove unused environment variables from the Docker runtime. Contributed by Eric Badger

2018-05-23 Thread hanishakoneru
YARN-8285. Remove unused environment variables from the Docker runtime. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9837ca9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9837ca9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9837ca9c

Branch: refs/heads/HDDS-48
Commit: 9837ca9cc746573571029f9fb996a1be10b588ab
Parents: 34e8b9f
Author: Shane Kumpf 
Authored: Wed May 23 06:43:44 2018 -0600
Committer: Shane Kumpf 
Committed: Wed May 23 06:43:44 2018 -0600

--
 .../linux/runtime/DockerLinuxContainerRuntime.java  | 9 -
 1 file changed, 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9837ca9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 787e892..e131e9d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -106,9 +106,6 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * will be used to launch the Docker container.
  *   
  *   
- * {@code YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE} is currently ignored.
- *   
- *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE} controls
  * whether the Docker container's default command is overridden.  When set
  * to {@code true}, the Docker container's command will be
@@ -198,9 +195,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
   "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE =
-  "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE";
   @InterfaceAudience.Private
@@ -216,9 +210,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_RUN_ENABLE_USER_REMAPPING =
-  "YARN_CONTAINER_RUNTIME_DOCKER_RUN_ENABLE_USER_REMAPPING";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
   @InterfaceAudience.Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/18] hadoop git commit: HDDS-85. Send Container State Info while sending the container report from Datanode to SCM. Contributed by Shashikant Banerjee.

2018-05-23 Thread hanishakoneru
HDDS-85. Send Container State Info while sending the container report from 
Datanode to SCM. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fed2bef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fed2bef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fed2bef6

Branch: refs/heads/HDDS-48
Commit: fed2bef647d9a15fe020ad5d3bb89fcb77ed30e6
Parents: 745f203
Author: Mukul Kumar Singh 
Authored: Wed May 23 14:15:35 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 14:15:35 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |  1 +
 .../container/common/helpers/ContainerData.java |  8 
 .../common/impl/ContainerManagerImpl.java   | 45 ++--
 .../common/interfaces/ContainerManager.java |  2 +-
 .../commandhandler/ContainerReportHandler.java  |  4 +-
 .../container/ozoneimpl/OzoneContainer.java |  4 +-
 .../common/impl/TestContainerPersistence.java   |  2 +-
 7 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 1138297..53da18a 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -131,6 +131,7 @@ enum Result {
   UNCLOSED_CONTAINER_IO = 25;
   DELETE_ON_OPEN_CONTAINER = 26;
   CLOSED_CONTAINER_RETRY = 27;
+  INVALID_CONTAINER_STATE = 28;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 14ee33a..d1746f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -340,6 +340,14 @@ public class ContainerData {
   }
 
   /**
+   * checks if the container is closed.
+   * @return - boolean
+   */
+  public synchronized  boolean isClosed() {
+return ContainerLifeCycleState.CLOSED == state;
+  }
+
+  /**
* Marks this container as closed.
*/
   public synchronized void closeContainer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index faee5d0..9355364 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -100,6 +102,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNCLOSED_CONTAINER_IO;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+Result.INVALID_CONTAINER_STATE;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 
 /**
@@ -707,6 +711,39 @@ public class ContainerManagerImpl implements 
ContainerManager {
   }
 
   /**
+   * Returns LifeCycle State of the container
+   * @param 

[17/18] hadoop git commit: HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao Liang.

2018-05-23 Thread hanishakoneru
HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0c9b7a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0c9b7a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0c9b7a8

Branch: refs/heads/HDDS-48
Commit: c0c9b7a8ef2618b7641a0452d9277abd26815de2
Parents: e83b943
Author: Inigo Goiri 
Authored: Wed May 23 09:46:35 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 09:46:35 2018 -0700

--
 .../server/datanode/fsdataset/impl/TestFsDatasetImpl.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0c9b7a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index d684950..9270be8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockReader;
@@ -666,7 +667,8 @@ public class TestFsDatasetImpl {
   TimeUnit.MILLISECONDS);
   config.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 
1);
 
-  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+  cluster = new MiniDFSCluster.Builder(config,
+  GenericTestUtils.getRandomizedTestDir()).numDataNodes(1).build();
   cluster.waitActive();
   FileSystem fs = cluster.getFileSystem();
   DataNode dataNode = cluster.getDataNodes().get(0);
@@ -688,7 +690,7 @@ public class TestFsDatasetImpl {
 // Remove write and execute access so that checkDiskErrorThread detects
 // this volume is bad.
 finalizedDir.setExecutable(false);
-finalizedDir.setWritable(false);
+assertTrue(FileUtil.setWritable(finalizedDir, false));
   }
   Assert.assertTrue("Reference count for the volume should be greater "
   + "than 0", volume.getReferenceCount() > 0);
@@ -709,7 +711,7 @@ public class TestFsDatasetImpl {
   } catch (IOException ioe) {
 GenericTestUtils.assertExceptionContains(info.getXferAddr(), ioe);
   }
-  finalizedDir.setWritable(true);
+  assertTrue(FileUtil.setWritable(finalizedDir, true));
   finalizedDir.setExecutable(true);
 } finally {
 cluster.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/18] hadoop git commit: YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). Contributed by Sunil G.

2018-05-23 Thread hanishakoneru
YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). 
Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61e3e75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61e3e75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61e3e75

Branch: refs/heads/HDDS-48
Commit: f61e3e752eb1cf4a08030da04bc3d6c5a2b3926d
Parents: 9837ca9
Author: Rohith Sharma K S 
Authored: Wed May 23 18:31:03 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed May 23 18:31:03 2018 +0530

--
 .../src/main/webapp/app/initializers/loader.js  | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61e3e75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 53f9c44..01daa7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -31,7 +31,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   $.ajax({
 type: 'GET',
 dataType: 'json',
-async: true,
+async: false,
 context: this,
 url: httpUrl,
 success: function(data) {
@@ -44,7 +44,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   application.advanceReadiness();
 }
   });
-  return protocolScheme == "HTTPS_ONLY";
+  return protocolScheme;
 }
 
 function getTimeLineURL(rmhost, isHttpsSchemeEnabled) {
@@ -97,7 +97,9 @@ function updateConfigs(application) {
 
   Ember.Logger.log("RM Address: " + rmhost);
 
-  var isHttpsSchemeEnabled = getYarnHttpProtocolScheme(rmhost, application);
+  var protocolSchemeFromRM = getYarnHttpProtocolScheme(rmhost, application);
+  Ember.Logger.log("Is protocol scheme https? " + (protocolSchemeFromRM == 
"HTTPS_ONLY"));
+  var isHttpsSchemeEnabled = (protocolSchemeFromRM == "HTTPS_ONLY");
   if(!ENV.hosts.timelineWebAddress) {
 var timelinehost = "";
 $.ajax({
@@ -137,7 +139,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getTimeLineV1URL(rmhost, isHttpsSchemeEnabled),
   success: function(data) {
@@ -171,7 +173,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getSecurityURL(rmhost),
   success: function(data) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/18] hadoop git commit: YARN-8310. Handle old NMTokenIdentifier, AMRMTokenIdentifier, and ContainerTokenIdentifier formats. Contributed by Robert Kanter.

2018-05-23 Thread hanishakoneru
YARN-8310. Handle old NMTokenIdentifier, AMRMTokenIdentifier, and 
ContainerTokenIdentifier formats. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e5f7ea9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e5f7ea9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e5f7ea9

Branch: refs/heads/HDDS-48
Commit: 3e5f7ea986600e084fcac723b0423e7de1b3bb8a
Parents: 68c7fd8
Author: Miklos Szegedi 
Authored: Tue May 22 18:10:33 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue May 22 18:10:33 2018 -0700

--
 .../main/java/org/apache/hadoop/io/IOUtils.java |  20 +++
 .../yarn/security/AMRMTokenIdentifier.java  |  33 -
 .../yarn/security/ContainerTokenIdentifier.java |  98 ---
 .../hadoop/yarn/security/NMTokenIdentifier.java |  32 -
 .../yarn/security/TestYARNTokenIdentifier.java  | 121 ++-
 5 files changed, 278 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e5f7ea9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 7288812..3708a3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -513,4 +513,24 @@ public class IOUtils {
   throw exception;
 }
   }
+
+  /**
+   * Reads a DataInput until EOF and returns a byte array.  Make sure not to
+   * pass in an infinite DataInput or this will never return.
+   *
+   * @param in A DataInput
+   * @return a byte array containing the data from the DataInput
+   * @throws IOException on I/O error, other than EOF
+   */
+  public static byte[] readFullyToByteArray(DataInput in) throws IOException {
+ByteArrayOutputStream baos = new ByteArrayOutputStream();
+try {
+  while (true) {
+baos.write(in.readByte());
+  }
+} catch (EOFException eof) {
+  // finished reading, do nothing
+}
+return baos.toByteArray();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e5f7ea9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
index 56411a7..ed83b06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
@@ -18,20 +18,26 @@
 
 package org.apache.hadoop.yarn.security;
 
+import java.io.ByteArrayInputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import 
org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto;
 
@@ -45,6 +51,8 @@ import com.google.protobuf.TextFormat;
 @Evolving
 public class AMRMTokenIdentifier extends TokenIdentifier {
 
+  private static final Log LOG = LogFactory.getLog(AMRMTokenIdentifier.class);
+
   public static final Text KIND_NAME = new Text("YARN_AM_RM_TOKEN");
   private AMRMTokenIdentifierProto proto;
 
@@ -78,7 +86,30 @@ public class AMRMTokenIdentifier extends TokenIdentifier {
 
   @Override
   public void readFields(DataInput in) throws IOException 

[18/18] hadoop git commit: HDDS-110. Checkstyle is not working in the HDDS precommit hook. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-110. Checkstyle is not working in the HDDS precommit hook.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/699a6918
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/699a6918
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/699a6918

Branch: refs/heads/HDDS-48
Commit: 699a6918aca2b57ae9ad0bff2c3aaf5a776da614
Parents: c0c9b7a
Author: Anu Engineer 
Authored: Wed May 23 09:42:21 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 23 10:01:53 2018 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/699a6918/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 0e7b23a..13f9255 100644
--- a/pom.xml
+++ b/pom.xml
@@ -322,7 +322,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   org.apache.hadoop
   hadoop-build-tools
-  ${project.version}
+  ${hadoop.version}
 
 
   com.puppycrawl.tools


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/18] hadoop git commit: HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part 
of the build.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43be9ab4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43be9ab4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43be9ab4

Branch: refs/heads/HDDS-48
Commit: 43be9ab44f27ae847e100efdc6810b192202fc55
Parents: b22f56c
Author: Anu Engineer 
Authored: Tue May 22 14:29:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 14:29:06 2018 -0700

--
 hadoop-ozone/docs/dev-support/bin/generate-site.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43be9ab4/hadoop-ozone/docs/dev-support/bin/generate-site.sh
--
diff --git a/hadoop-ozone/docs/dev-support/bin/generate-site.sh 
b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
index 3323935..374e74b 100755
--- a/hadoop-ozone/docs/dev-support/bin/generate-site.sh
+++ b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
@@ -19,7 +19,7 @@ DOCDIR="$DIR/../.."
 
 if [ ! "$(which hugo)" ]; then
echo "Hugo is not yet installed. Doc generation is skipped."
-   exit -1
+   exit 0
 fi
 
 DESTDIR="$DOCDIR/target/classes/webapps/docs"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/18] hadoop git commit: HDDS-89. Addendum Patch. Create ozone specific inline documentation as part of the build Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 60821fb20 -> 699a6918a


HDDS-89. Addendum Patch. Create ozone specific inline documentation as part of 
the build
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/523f602f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/523f602f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/523f602f

Branch: refs/heads/HDDS-48
Commit: 523f602f81eafd56e4adfadd70d7c9a672b5813a
Parents: 60821fb
Author: Anu Engineer 
Authored: Tue May 22 13:20:42 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 13:20:42 2018 -0700

--
 hadoop-dist/pom.xml | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/523f602f/hadoop-dist/pom.xml
--
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 41e040f..dfbf818 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -68,13 +68,6 @@
   hadoop-client-integration-tests
   provided
 
-
-  org.apache.hadoop
-  hadoop-ozone-docs
-  provided
-
-
-
 
   
 
@@ -267,6 +260,11 @@
   hadoop-ozone-tools
   provided
 
+
+  org.apache.hadoop
+  hadoop-ozone-docs
+  provided
+
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/18] hadoop git commit: HDDS-84. The root directory of ozone.tar.gz should contain the version string. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-84. The root directory of ozone.tar.gz should contain the version string. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63fc5873
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63fc5873
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63fc5873

Branch: refs/heads/HDDS-48
Commit: 63fc5873cee41b883e988ead00fc6f6cf74fae97
Parents: f61e3e7
Author: Mukul Kumar Singh 
Authored: Wed May 23 21:07:37 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 21:07:37 2018 +0530

--
 dev-support/bin/ozone-dist-tar-stitching | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fc5873/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index decfa23..d1116e4 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -41,7 +41,7 @@ function run()
 #To include the version name in the root directory of the tar file
 # we create a symbolic link and dereference it during the tar creation
 ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone"
+run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/18] hadoop git commit: YARN-8332. Incorrect min/max allocation property name in resource types doc. (Weiwei Yang via wangda)

2018-05-23 Thread hanishakoneru
YARN-8332. Incorrect min/max allocation property name in resource types doc. 
(Weiwei Yang via wangda)

Change-Id: If74f1ceed9c045a2cb2d6593741278b65ac44a9f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83f53e5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83f53e5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83f53e5c

Branch: refs/heads/HDDS-48
Commit: 83f53e5c6236de30c213dc41878cebfb02597e26
Parents: bd15d23
Author: Wangda Tan 
Authored: Tue May 22 13:29:21 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 22 13:33:33 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f53e5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
index f968b5f..ac16d53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -49,8 +49,8 @@ The following configuration properties are supported. See 
below for details.
 |: |: |
 | `yarn.resource-types` | Comma-separated list of additional resources. May 
not include `memory`, `memory-mb`, or `vcores` |
 | `yarn.resource-types..units` | Default unit for the specified 
resource type |
-| `yarn.resource-types..minimum` | The minimum request for the 
specified resource type |
-| `yarn.resource-types..maximum` | The maximum request for the 
specified resource type |
+| `yarn.resource-types..minimum-allocation` | The minimum request 
for the specified resource type |
+| `yarn.resource-types..maximum-allocation` | The maximum request 
for the specified resource type |
 
 `node-resources.xml`
 
@@ -127,8 +127,8 @@ set the default unit for the resource type. Valid values 
are:
 
 The property must be named `yarn.resource-types..units`. Each defined
 resource may also have optional minimum and maximum properties. The properties
-must be named `yarn.resource-types..minimum` and
-`yarn.resource-types..maximum`.
+must be named `yarn.resource-types..minimum-allocation` and
+`yarn.resource-types..maximum-allocation`.
 
 The `yarn.resource-types` property and any unit, mimimum, or maximum properties
 may be defined in either the usual `yarn-site.xml` file or in a file named
@@ -147,12 +147,12 @@ may be defined in either the usual `yarn-site.xml` file 
or in a file named
   
 
   
-yarn.resource-types.resource2.minimum
+yarn.resource-types.resource2.minimum-allocation
 1
   
 
   
-yarn.resource-types.resource2.maximum
+yarn.resource-types.resource2.maximum-allocation
 1024
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/18] hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread hanishakoneru
HDFS-13601. Optimize ByteString conversions in PBHelper.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d2640b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d2640b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d2640b6

Branch: refs/heads/HDDS-48
Commit: 1d2640b6132e8308c07476badd2d1482be68a298
Parents: 5a91406
Author: Andrew Wang 
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang 
Committed: Tue May 22 23:55:20 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  5 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 118 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 8e2bc94..fa9654b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -91,5 +91,10 @@
 
 
   
+  
+
+
+
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.datanodeUuidBytes = datanodeUuidBytes;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
   }
 
+  private static ByteString getByteString(String str) {
+ 

[2/2] hadoop git commit: HDDS-156. Implement HDDSVolume to manage volume state

2018-06-14 Thread hanishakoneru
HDDS-156. Implement HDDSVolume to manage volume state


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a5552bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a5552bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a5552bf

Branch: refs/heads/HDDS-48
Commit: 9a5552bf762880c38a233597b7c6e9ea09441108
Parents: 418cff4
Author: Hanisha Koneru 
Authored: Thu Jun 14 13:28:41 2018 -0700
Committer: Hanisha Koneru 
Committed: Thu Jun 14 13:28:41 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   5 +
 .../container/common/DataNodeLayoutVersion.java |  80 +
 .../common/helpers/DatanodeVersionFile.java |  95 ++
 .../impl/RoundRobinVolumeChoosingPolicy.java|  82 -
 .../ozone/container/common/impl/VolumeInfo.java | 162 -
 .../ozone/container/common/impl/VolumeSet.java  | 251 --
 .../container/common/impl/VolumeUsage.java  | 189 ---
 .../common/interfaces/VolumeChoosingPolicy.java |   4 +-
 .../container/common/utils/HddsVolumeUtil.java  | 163 +
 .../container/common/volume/HddsVolume.java | 330 +++
 .../volume/RoundRobinVolumeChoosingPolicy.java  |  83 +
 .../container/common/volume/VolumeInfo.java | 132 
 .../container/common/volume/VolumeSet.java  | 309 +
 .../container/common/volume/VolumeUsage.java| 198 +++
 .../container/common/volume/package-info.java   |  21 ++
 .../common/TestDatanodeLayOutVersion.java   |  38 +++
 .../common/helpers/TestDatanodeVersionFile.java | 134 
 .../TestRoundRobinVolumeChoosingPolicy.java | 100 --
 .../common/interfaces/TestVolumeSet.java| 149 -
 .../container/common/volume/TestHddsVolume.java | 145 
 .../TestRoundRobinVolumeChoosingPolicy.java | 131 
 .../container/common/volume/TestVolumeSet.java  | 157 +
 22 files changed, 2023 insertions(+), 935 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index c40dc8e..36f830b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -33,6 +33,11 @@ public final class OzoneConsts {
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
 
+  public static final String STORAGE_ID = "storageID";
+  public static final String DATANODE_UUID = "datanodeUuid";
+  public static final String CLUSTER_ID = "clusterID";
+  public static final String LAYOUTVERSION = "layOutVersion";
+  public static final String CTIME = "ctime";
   /*
* BucketName length is used for both buckets and volume lengths
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
new file mode 100644
index 000..2d58c39
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common;
+
+/**
+ * Datanode layout version which describes information about the layout version
+ * on the datanode.
+ */
+public final class DataNodeLayoutVersion {
+
+  // We will just be 

[1/2] hadoop git commit: HDDS-156. Implement HDDSVolume to manage volume state

2018-06-14 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 418cff482 -> 9a5552bf7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
new file mode 100644
index 000..5889222
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class tests {@link DatanodeVersionFile}.
+ */
+public class TestDatanodeVersionFile {
+
+  private File versionFile;
+  private DatanodeVersionFile dnVersionFile;
+  private Properties properties;
+
+  private String storageID;
+  private String clusterID;
+  private String datanodeUUID;
+  private long cTime;
+  private int lv;
+
+  @Rule
+  public TemporaryFolder folder= new TemporaryFolder();
+
+  @Before
+  public void setup() throws IOException {
+versionFile = folder.newFile("Version");
+storageID = UUID.randomUUID().toString();
+clusterID = UUID.randomUUID().toString();
+datanodeUUID = UUID.randomUUID().toString();
+cTime = Time.now();
+lv = DataNodeLayoutVersion.getLatestVersion().getVersion();
+
+dnVersionFile = new DatanodeVersionFile(
+storageID, clusterID, datanodeUUID, cTime, lv);
+
+dnVersionFile.createVersionFile(versionFile);
+
+properties = dnVersionFile.readFrom(versionFile);
+  }
+
+  @Test
+  public void testCreateAndReadVersionFile() throws IOException{
+
+//Check VersionFile exists
+assertTrue(versionFile.exists());
+
+assertEquals(storageID, HddsVolumeUtil.getStorageID(
+properties, versionFile));
+assertEquals(clusterID, HddsVolumeUtil.getClusterID(
+properties, versionFile, clusterID));
+assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID(
+properties, versionFile, datanodeUUID));
+assertEquals(cTime, HddsVolumeUtil.getCreationTime(
+properties, versionFile));
+assertEquals(lv, HddsVolumeUtil.getLayOutVersion(
+properties, versionFile));
+  }
+
+  @Test
+  public void testIncorrectClusterId() throws IOException{
+try {
+  String randomClusterID = UUID.randomUUID().toString();
+  HddsVolumeUtil.getClusterID(properties, versionFile,
+  randomClusterID);
+  fail("Test failure in testIncorrectClusterId");
+} catch (InconsistentStorageStateException ex) {
+  GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex);
+}
+  }
+
+  @Test
+  public void testVerifyCTime() throws IOException{
+long invalidCTime = -10;
+dnVersionFile = new DatanodeVersionFile(
+storageID, clusterID, datanodeUUID, invalidCTime, lv);
+dnVersionFile.createVersionFile(versionFile);
+properties = dnVersionFile.readFrom(versionFile);
+
+try {
+  HddsVolumeUtil.getCreationTime(properties, versionFile);
+  fail("Test failure in testVerifyCTime");
+} catch (InconsistentStorageStateException ex) {
+  GenericTestUtils.assertExceptionContains("Invalid Creation time in " +
+  "Version File : " + versionFile, ex);
+}
+  }
+
+  @Test
+  public void testVerifyLayOut() throws IOException{
+ 

hadoop git commit: Revert "Create Version File in Datanode. Contributed by Bharat Viswanadham."

2018-06-11 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 143dd560b -> 0e437f9b1


Revert "Create Version File in Datanode. Contributed by Bharat Viswanadham."

This reverts commit f26d3466d79125123cba00ab81481655d7bfe3c1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e437f9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e437f9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e437f9b

Branch: refs/heads/HDDS-48
Commit: 0e437f9b174f3b1eaf41b63ae707dd76379b8e8b
Parents: 143dd56
Author: Hanisha Koneru 
Authored: Mon Jun 11 12:15:39 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon Jun 11 12:15:39 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   2 -
 .../org/apache/hadoop/ozone/common/Storage.java |   6 +-
 .../container/common/DataNodeLayoutVersion.java |  80 -
 .../common/helpers/DatanodeVersionFile.java | 172 ---
 .../states/datanode/RunningDatanodeState.java   |   3 +-
 .../states/endpoint/VersionEndpointTask.java|  71 +---
 .../container/ozoneimpl/OzoneContainer.java |   8 +-
 .../hadoop/ozone/protocol/VersionResponse.java  |   4 -
 .../ozone/container/common/ScmTestMock.java |  24 ---
 .../common/TestDatanodeLayOutVersion.java   |  38 
 .../common/TestDatanodeStateMachine.java|   3 +-
 .../common/helpers/TestDatanodeVersionFile.java | 120 -
 .../hadoop/hdds/scm/node/SCMNodeManager.java|   2 -
 .../ozone/container/common/TestEndPoint.java| 169 +-
 14 files changed, 14 insertions(+), 688 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index ce1a733..451a08f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -29,8 +29,6 @@ public final class OzoneConsts {
 
   public static final String STORAGE_DIR = "scm";
   public static final String SCM_ID = "scmUuid";
-  public static final String LAYOUTVERSION = "layOutVersion";
-  public static final String CTIME = "ctime";
 
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index 35ddc71..fb30d92 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -45,10 +45,8 @@ import java.util.Properties;
 public abstract class Storage {
   private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
 
-  public static final String STORAGE_DIR_CURRENT = "current";
-  public static final String STORAGE_FILE_VERSION = "VERSION";
-  public static final String STORAGE_DIR_HDDS = "hdds";
-
+  protected static final String STORAGE_DIR_CURRENT = "current";
+  protected static final String STORAGE_FILE_VERSION = "VERSION";
 
   private final NodeType nodeType;
   private final File root;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
deleted file mode 100644
index 2d58c39..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * 

[38/50] [abbrv] hadoop git commit: HADOOP-14946 S3Guard testPruneCommandCLI can fail. Contributed by Gabor Bota.

2018-05-30 Thread hanishakoneru
HADOOP-14946 S3Guard testPruneCommandCLI can fail. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30284d02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30284d02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30284d02

Branch: refs/heads/HDDS-48
Commit: 30284d020d36c502dad5bdbae61ec48e9dfe9f8c
Parents: 201440b
Author: Aaron Fabbri 
Authored: Tue May 29 13:38:15 2018 -0700
Committer: Aaron Fabbri 
Committed: Tue May 29 13:38:15 2018 -0700

--
 .../s3guard/AbstractS3GuardToolTestBase.java| 52 +---
 1 file changed, 44 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30284d02/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 4381749..2b43810 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -31,6 +31,7 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.util.StopWatch;
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -61,6 +62,8 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   protected static final String S3A_THIS_BUCKET_DOES_NOT_EXIST
   = "s3a://this-bucket-does-not-exist-000";
 
+  private static final int PRUNE_MAX_AGE_SECS = 2;
+
   private MetadataStore ms;
 
   protected static void expectResult(int expected,
@@ -186,24 +189,57 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
 }
   }
 
+  /**
+   * Attempt to test prune() with sleep() without having flaky tests
+   * when things run slowly. Test is basically:
+   * 1. Set max path age to X seconds
+   * 2. Create some files (which writes entries to MetadataStore)
+   * 3. Sleep X+2 seconds (all files from above are now "stale")
+   * 4. Create some other files (these are "fresh").
+   * 5. Run prune on MetadataStore.
+   * 6. Assert that only files that were created before the sleep() were 
pruned.
+   *
+   * Problem is: #6 can fail if X seconds elapse between steps 4 and 5, since
+   * the newer files also become stale and get pruned.  This is easy to
+   * reproduce by running all integration tests in parallel with a ton of
+   * threads, or anything else that slows down execution a lot.
+   *
+   * Solution: Keep track of time elapsed between #4 and #5, and if it
+   * exceeds X, just print a warn() message instead of failing.
+   *
+   * @param cmdConf configuration for command
+   * @param parent path
+   * @param args command args
+   * @throws Exception
+   */
   private void testPruneCommand(Configuration cmdConf, Path parent,
   String...args) throws Exception {
 Path keepParent = path("prune-cli-keep");
+StopWatch timer = new StopWatch();
 try {
-  getFileSystem().mkdirs(parent);
-  getFileSystem().mkdirs(keepParent);
-
   S3GuardTool.Prune cmd = new S3GuardTool.Prune(cmdConf);
   cmd.setMetadataStore(ms);
 
+  getFileSystem().mkdirs(parent);
+  getFileSystem().mkdirs(keepParent);
   createFile(new Path(parent, "stale"), true, true);
   createFile(new Path(keepParent, "stale-to-keep"), true, true);
-  Thread.sleep(TimeUnit.SECONDS.toMillis(2));
+
+  Thread.sleep(TimeUnit.SECONDS.toMillis(PRUNE_MAX_AGE_SECS + 2));
+
+  timer.start();
   createFile(new Path(parent, "fresh"), true, true);
 
   assertMetastoreListingCount(parent, "Children count before pruning", 2);
   exec(cmd, args);
-  assertMetastoreListingCount(parent, "Pruned children count", 1);
+  long msecElapsed = timer.now(TimeUnit.MILLISECONDS);
+  if (msecElapsed >= PRUNE_MAX_AGE_SECS * 1000) {
+LOG.warn("Skipping an assertion: Test running too slowly ({} msec)",
+msecElapsed);
+  } else {
+assertMetastoreListingCount(parent, "Pruned children count remaining",
+1);
+  }
   assertMetastoreListingCount(keepParent,
   "This child should have been kept (prefix restriction).", 1);
 } finally {
@@ -224,13 +260,14 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   public void testPruneCommandCLI() throws Exception {
 Path testPath = path("testPruneCommandCLI");
 

[37/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
HDDS-81. Moving ContainerReport inside Datanode heartbeat.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/201440b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/201440b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/201440b9

Branch: refs/heads/HDDS-48
Commit: 201440b987d5ef3910c2045b2411c213ed6eec1f
Parents: 4827e9a
Author: Anu Engineer 
Authored: Tue May 29 12:40:27 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 29 12:48:50 2018 -0700

--
 .../common/impl/ContainerManagerImpl.java   |  22 +-
 .../common/impl/StorageLocationReport.java  |   8 +-
 .../common/interfaces/ContainerManager.java |   8 +-
 .../statemachine/DatanodeStateMachine.java  |   7 +-
 .../common/statemachine/StateContext.java   |  16 +-
 .../CloseContainerCommandHandler.java   | 113 
 .../commandhandler/CloseContainerHandler.java   | 113 
 .../commandhandler/CommandDispatcher.java   |   5 +-
 .../commandhandler/CommandHandler.java  |   8 +-
 .../DeleteBlocksCommandHandler.java |  12 +-
 .../states/endpoint/HeartbeatEndpointTask.java  |  30 +-
 .../states/endpoint/RegisterEndpointTask.java   |  12 +-
 .../container/ozoneimpl/OzoneContainer.java |  10 +-
 .../StorageContainerDatanodeProtocol.java   |  30 +-
 .../protocol/StorageContainerNodeProtocol.java  |  15 +-
 .../commands/CloseContainerCommand.java |  18 +-
 .../protocol/commands/DeleteBlocksCommand.java  |  18 +-
 .../protocol/commands/RegisteredCommand.java|  26 +-
 .../protocol/commands/ReregisterCommand.java|  16 +-
 .../ozone/protocol/commands/SCMCommand.java |   4 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  50 +---
 ...rDatanodeProtocolServerSideTranslatorPB.java |  53 ++--
 .../StorageContainerDatanodeProtocol.proto  | 256 -
 .../ozone/container/common/ScmTestMock.java |  78 ++
 .../hdds/scm/container/ContainerMapping.java|  10 +-
 .../hadoop/hdds/scm/container/Mapping.java  |   6 +-
 .../replication/ContainerSupervisor.java|  13 +-
 .../container/replication/InProgressPool.java   |  15 +-
 .../hdds/scm/node/HeartbeatQueueItem.java   |  14 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  58 ++--
 .../hdds/scm/node/SCMNodeStorageStatMap.java|  14 +-
 .../scm/server/SCMDatanodeProtocolServer.java   | 195 +++--
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  19 +-
 .../hdds/scm/container/MockNodeManager.java |  26 +-
 .../scm/container/TestContainerMapping.java |  24 +-
 .../container/closer/TestContainerCloser.java   |  12 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   6 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  83 +++---
 .../scm/node/TestSCMNodeStorageStatMap.java |  16 +-
 .../ozone/container/common/TestEndPoint.java| 113 ++--
 .../replication/TestContainerSupervisor.java| 275 ---
 .../ReplicationDatanodeStateManager.java| 101 ---
 .../testutils/ReplicationNodeManagerMock.java   |  14 +-
 .../ozone/TestStorageContainerManager.java  |  11 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |  68 ++---
 45 files changed, 706 insertions(+), 1315 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 9355364..af47015 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,11 +35,11 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+.StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.io.IOUtils;
 

[41/50] [abbrv] hadoop git commit: HADOOP-15480 AbstractS3GuardToolTestBase.testDiffCommand fails when using dynamo (Gabor Bota)

2018-05-30 Thread hanishakoneru
HADOOP-15480 AbstractS3GuardToolTestBase.testDiffCommand fails when using 
dynamo (Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f6769f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f6769f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f6769f7

Branch: refs/heads/HDDS-48
Commit: 5f6769f7964ff002b6c04a95893b5baeb424b6db
Parents: 135941e
Author: Aaron Fabbri 
Authored: Tue May 29 19:20:22 2018 -0700
Committer: Aaron Fabbri 
Committed: Tue May 29 19:20:22 2018 -0700

--
 .../s3guard/AbstractS3GuardToolTestBase.java| 37 +---
 .../s3a/s3guard/ITestS3GuardToolDynamoDB.java   |  5 ---
 .../fs/s3a/s3guard/ITestS3GuardToolLocal.java   |  5 ---
 3 files changed, 25 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6769f7/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 2b43810..7d75f52 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -25,6 +25,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.net.URI;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
@@ -32,6 +33,8 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.util.StopWatch;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.FileSystem;
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
+import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
@@ -65,6 +70,7 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   private static final int PRUNE_MAX_AGE_SECS = 2;
 
   private MetadataStore ms;
+  private S3AFileSystem rawFs;
 
   protected static void expectResult(int expected,
   String message,
@@ -129,28 +135,34 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
 return ms;
   }
 
-  protected abstract MetadataStore newMetadataStore();
-
   @Override
   public void setup() throws Exception {
 super.setup();
 S3ATestUtils.assumeS3GuardState(true, getConfiguration());
-ms = newMetadataStore();
-ms.initialize(getFileSystem());
+ms = getFileSystem().getMetadataStore();
+
+// Also create a "raw" fs without any MetadataStore configured
+Configuration conf = new Configuration(getConfiguration());
+conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
+URI fsUri = getFileSystem().getUri();
+rawFs = (S3AFileSystem) FileSystem.newInstance(fsUri, conf);
   }
 
   @Override
   public void teardown() throws Exception {
 super.teardown();
 IOUtils.cleanupWithLogger(LOG, ms);
+IOUtils.closeStream(rawFs);
   }
 
   protected void mkdirs(Path path, boolean onS3, boolean onMetadataStore)
   throws IOException {
+Preconditions.checkArgument(onS3 || onMetadataStore);
+// getFileSystem() returns an fs with MetadataStore configured
+S3AFileSystem fs = onMetadataStore ? getFileSystem() : rawFs;
 if (onS3) {
-  getFileSystem().mkdirs(path);
-}
-if (onMetadataStore) {
+  fs.mkdirs(path);
+} else if (onMetadataStore) {
   S3AFileStatus status = new S3AFileStatus(true, path, OWNER);
   ms.put(new PathMetadata(status));
 }
@@ -178,13 +190,14 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
*/
   protected void createFile(Path path, boolean onS3, boolean onMetadataStore)
   throws IOException {
+Preconditions.checkArgument(onS3 || onMetadataStore);
+// getFileSystem() returns an fs with MetadataStore configured
+S3AFileSystem fs = onMetadataStore ? getFileSystem() : rawFs;
 if (onS3) {
-  ContractTestUtils.touch(getFileSystem(), path);
-}
-
-if (onMetadataStore) {
+

[47/50] [abbrv] hadoop git commit: YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith Sharma K S

2018-05-30 Thread hanishakoneru
YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith 
Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96eefcc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96eefcc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96eefcc8

Branch: refs/heads/HDDS-48
Commit: 96eefcc84aacc4cc82ad7e3e72c5bdad56f4a7b7
Parents: 47c31ff
Author: Billie Rinaldi 
Authored: Wed May 30 12:37:01 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed May 30 12:37:43 2018 -0700

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 28 +++-
 .../hadoop/yarn/service/ServiceClientTest.java  | 18 -
 .../yarn/service/client/ServiceClient.java  |  2 ++
 3 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96eefcc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 46c9abe..578273c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -641,20 +641,24 @@ public class ApiServer {
   private Response startService(String appName,
   final UserGroupInformation ugi) throws IOException,
   InterruptedException {
-ugi.doAs(new PrivilegedExceptionAction() {
-  @Override
-  public Void run() throws YarnException, IOException {
-ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-sc.actionStart(appName);
-sc.close();
-return null;
-  }
-});
+ApplicationId appId =
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override public ApplicationId run()
+  throws YarnException, IOException {
+ServiceClient sc = getServiceClient();
+sc.init(YARN_CONFIG);
+sc.start();
+sc.actionStart(appName);
+ApplicationId appId = sc.getAppId(appName);
+sc.close();
+return appId;
+  }
+});
 LOG.info("Successfully started service " + appName);
 ServiceStatus status = new ServiceStatus();
-status.setDiagnostics("Service " + appName + " is successfully started.");
+status.setDiagnostics(
+"Service " + appName + " is successfully started with ApplicationId: "
++ appId);
 status.setState(ServiceState.ACCEPTED);
 return formatResponse(Status.OK, status);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96eefcc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
index 75b9486..81be750 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
@@ -34,8 +34,10 @@ import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
@@ -50,6 +52,8 @@ public class ServiceClientTest extends ServiceClient {
   private Service goodServiceStatus = buildLiveGoodService();
   private boolean initialized;
   private Set expectedInstances 

[32/50] [abbrv] hadoop git commit: HDDS-125. Cleanup HDDS CheckStyle issues. Contributed by Anu Engineer.

2018-05-30 Thread hanishakoneru
HDDS-125. Cleanup HDDS CheckStyle issues.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9502b47b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9502b47b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9502b47b

Branch: refs/heads/HDDS-48
Commit: 9502b47bd2a3cf32edae635293169883c2914475
Parents: 17aa40f
Author: Anu Engineer 
Authored: Tue May 29 09:54:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 29 09:54:06 2018 -0700

--
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  1 -
 .../hdds/scm/block/DeletedBlockLogImpl.java |  2 +-
 .../hdds/scm/container/ContainerMapping.java|  6 +-
 .../scm/container/ContainerStateManager.java| 24 +++
 .../hadoop/hdds/scm/container/Mapping.java  |  9 ++-
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |  4 +-
 .../hdds/scm/node/SCMNodeStorageStatMap.java| 19 +++---
 .../hdds/scm/node/StorageReportResult.java  |  8 +--
 .../hdds/scm/node/states/Node2ContainerMap.java |  2 +-
 .../hdds/scm/pipelines/PipelineSelector.java|  5 +-
 .../scm/server/StorageContainerManager.java |  3 +-
 .../TestStorageContainerManagerHttpServer.java  |  1 -
 .../hadoop/hdds/scm/block/package-info.java | 23 +++
 .../scm/container/TestContainerMapping.java | 12 ++--
 .../hdds/scm/container/closer/package-info.java | 22 +++
 .../hadoop/hdds/scm/container/package-info.java | 22 +++
 .../hdds/scm/container/states/package-info.java | 22 +++
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 66 ++--
 .../scm/node/TestSCMNodeStorageStatMap.java | 32 +-
 .../hadoop/hdds/scm/node/package-info.java  | 22 +++
 .../ozone/container/common/TestEndPoint.java|  2 -
 .../ozone/container/common/package-info.java| 22 +++
 .../ozone/container/placement/package-info.java | 22 +++
 .../replication/TestContainerSupervisor.java|  7 ++-
 24 files changed, 263 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 5a98e85..d17d6c0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -41,7 +41,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index cabcb46..cedc506 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -190,7 +190,7 @@ public class DeletedBlockLogImpl implements DeletedBlockLog 
{
 try {
   for(Long txID : txIDs) {
 try {
-  byte [] deleteBlockBytes =
+  byte[] deleteBlockBytes =
   deletedStore.get(Longs.toByteArray(txID));
   if (deleteBlockBytes == null) {
 LOG.warn("Delete txID {} not found", txID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index e569874..2d88621 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -152,7 +152,8 @@ public class ContainerMapping implements Mapping {
 ContainerInfo containerInfo;
 lock.lock();
 try {
-  byte[] 

[49/50] [abbrv] hadoop git commit: HDDS-92. Use DBType during parsing datanode .container files. Contributed by Bharat Viswanadham.

2018-05-30 Thread hanishakoneru
HDDS-92. Use DBType during parsing datanode .container files. Contributed by 
Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee1e0e20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee1e0e20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee1e0e20

Branch: refs/heads/HDDS-48
Commit: ee1e0e20368e825a6cef347ed9637c9cf67be45f
Parents: 778a4a2
Author: Xiaoyu Yao 
Authored: Thu May 24 18:25:35 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:04:19 2018 -0700

--
 .../hadoop/utils/MetadataStoreBuilder.java  |  40 +-
 .../apache/hadoop/ozone/TestMetadataStore.java  | 414 -
 .../apache/hadoop/utils/TestMetadataStore.java  | 463 +++
 .../container/common/helpers/KeyUtils.java  |   3 +-
 .../container/common/utils/ContainerCache.java  |   6 +-
 5 files changed, 503 insertions(+), 423 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1e0e20/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
index 9e9c32a..85cebed 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
@@ -18,12 +18,15 @@
 
 package org.apache.hadoop.utils;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.Statistics;
 import org.rocksdb.StatsLevel;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -44,10 +47,14 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
  */
 public class MetadataStoreBuilder {
 
+  @VisibleForTesting
+  static final Logger LOG =
+  LoggerFactory.getLogger(MetadataStoreBuilder.class);
   private File dbFile;
   private long cacheSize;
   private boolean createIfMissing = true;
   private Configuration conf;
+  private String dbType;
 
   public static MetadataStoreBuilder newBuilder() {
 return new MetadataStoreBuilder();
@@ -73,6 +80,17 @@ public class MetadataStoreBuilder {
 return this;
   }
 
+  /**
+   * Set the container DB Type.
+   * @param type
+   * @return MetadataStoreBuilder
+   */
+  public MetadataStoreBuilder setDBType(String type) {
+this.dbType = type;
+return this;
+  }
+
+
   public MetadataStore build() throws IOException {
 if (dbFile == null) {
   throw new IllegalArgumentException("Failed to build metadata store, "
@@ -81,18 +99,26 @@ public class MetadataStoreBuilder {
 
 // Build db store based on configuration
 MetadataStore store = null;
-String impl = conf == null ?
-OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
-conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
-if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(impl)) {
+
+if(dbType == null) {
+  LOG.debug("dbType is null, using ");
+  dbType = conf == null ?
+  OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
+  conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+  OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
+  LOG.debug("dbType is null, using dbType {} from ozone configuration",
+  dbType);
+} else {
+  LOG.debug("Using dbType {} for metastore", dbType);
+}
+if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) {
   Options options = new Options();
   options.createIfMissing(createIfMissing);
   if (cacheSize > 0) {
 options.cacheSize(cacheSize);
   }
   store = new LevelDBStore(dbFile, options);
-} else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(impl)) {
+} else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
   org.rocksdb.Options opts = new org.rocksdb.Options();
   opts.setCreateIfMissing(createIfMissing);
 
@@ -119,7 +145,7 @@ public class MetadataStoreBuilder {
   + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
   + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
   + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
-  + ", but met " + impl);
+  + ", but met " + dbType);
 }
 return store;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1e0e20/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java

[35/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
deleted file mode 100644
index 50fd18f..000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.primitives.Longs;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-.HEALTHY;
-
-/**
- * This class  manages the state of datanode
- * in conjunction with the node pool and node managers.
- */
-public class ReplicationDatanodeStateManager {
-  private final NodeManager nodeManager;
-  private final NodePoolManager poolManager;
-  private final Random r;
-
-  /**
-   * The datanode state Manager.
-   *
-   * @param nodeManager
-   * @param poolManager
-   */
-  public ReplicationDatanodeStateManager(NodeManager nodeManager,
-  NodePoolManager poolManager) {
-this.nodeManager = nodeManager;
-this.poolManager = poolManager;
-r = new Random();
-  }
-
-  /**
-   * Get Container Report as if it is from a datanode in the cluster.
-   * @param containerID - Container ID.
-   * @param poolName - Pool Name.
-   * @param dataNodeCount - Datanode Count.
-   * @return List of Container Reports.
-   */
-  public List getContainerReport(
-  long containerID, String poolName, int dataNodeCount) {
-List containerList = new LinkedList<>();
-List nodesInPool = poolManager.getNodes(poolName);
-
-if (nodesInPool == null) {
-  return containerList;
-}
-
-if (nodesInPool.size() < dataNodeCount) {
-  throw new IllegalStateException("Not enough datanodes to create " +
-  "required container reports");
-}
-
-while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) {
-  DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size()));
-  nodesInPool.remove(id);
-  containerID++;
-  // We return container reports only for nodes that are healthy.
-  if (nodeManager.getNodeState(id) == HEALTHY) {
-ContainerInfo info = ContainerInfo.newBuilder()
-.setContainerID(containerID)
-.setFinalhash(DigestUtils.sha256Hex(
-Longs.toByteArray(containerID)))
-.setContainerID(containerID)
-.build();
-ContainerReportsRequestProto containerReport =
-ContainerReportsRequestProto.newBuilder().addReports(info)
-.setDatanodeDetails(id.getProtoBufMessage())
-.setType(ContainerReportsRequestProto.reportType.fullReport)
-.build();
-containerList.add(containerReport);
-  }
-}
-return containerList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java

[36/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 2d88621..f5fe46a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -33,7 +34,7 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.lease.Lease;
 import org.apache.hadoop.ozone.lease.LeaseException;
@@ -368,11 +369,12 @@ public class ContainerMapping implements Mapping {
* @param reports Container report
*/
   @Override
-  public void processContainerReports(ContainerReportsRequestProto reports)
+  public void processContainerReports(DatanodeDetails datanodeDetails,
+  ContainerReportsProto reports)
   throws IOException {
 List
 containerInfos = reports.getReportsList();
-containerSupervisor.handleContainerReport(reports);
+containerSupervisor.handleContainerReport(datanodeDetails, reports);
 for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
 containerInfos) {
   byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
@@ -402,7 +404,7 @@ public class ContainerMapping implements Mapping {
   // Container not found in our container db.
   LOG.error("Error while processing container report from datanode :" +
   " {}, for container: {}, reason: container doesn't exist in" 
+
-  "container database.", reports.getDatanodeDetails(),
+  "container database.", datanodeDetails,
   datanodeState.getContainerID());
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index f560174..ee8e344 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -16,10 +16,11 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -98,7 +99,8 @@ public interface Mapping extends Closeable {
*
* @param reports Container report
*/
-  void processContainerReports(ContainerReportsRequestProto reports)
+  void processContainerReports(DatanodeDetails datanodeDetails,
+   ContainerReportsProto reports)
   throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
index c14303f..5bd0574 100644
--- 

[33/50] [abbrv] hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-30 Thread hanishakoneru
Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3236a96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3236a96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3236a96

Branch: refs/heads/HDDS-48
Commit: e3236a9680709de7a95ffbc11b20e1bdc95a8605
Parents: 9502b47
Author: Kihwal Lee 
Authored: Tue May 29 14:15:12 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:15:12 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java | 10 +
 .../java/org/apache/hadoop/util/TestRunJar.java | 42 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index f1b643c..4c94dbc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -117,12 +117,17 @@ public class RunJar {
   throws IOException {
 try (JarInputStream jar = new JarInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for (JarEntry entry = jar.getNextJarEntry();
entry != null;
entry = jar.getNextJarEntry()) {
 if (!entry.isDirectory() &&
 unpackRegex.matcher(entry.getName()).matches()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   ensureDirectory(file.getParentFile());
   try (OutputStream out = new FileOutputStream(file)) {
 IOUtils.copyBytes(jar, out, BUFFER_SIZE);
@@ -182,6 +187,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -189,6 +195,10 @@ public class RunJar {
 unpackRegex.matcher(entry.getName()).matches()) {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 ensureDirectory(file.getParentFile());
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index ea07b97..a8c27d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
@@ -32,6 +33,7 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Random;
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
@@ -255,4 +257,44 @@ public class TestRunJar {
 // it should not throw an exception
 verify(runJar, times(0)).unJar(any(File.class), any(File.class));
   }
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, 

[27/50] [abbrv] hadoop git commit: HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.

2018-05-30 Thread hanishakoneru
HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by 
Yuen-Kuei Hsueh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/438ef495
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/438ef495
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/438ef495

Branch: refs/heads/HDDS-48
Commit: 438ef4951a38171f193eaf2631da31d0f4bc3c62
Parents: 8fdc993
Author: Wei-Chiu Chuang 
Authored: Mon May 28 17:32:32 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon May 28 17:32:32 2018 -0700

--
 .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/438ef495/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index b02f34e..17faec2 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -247,9 +247,9 @@ public class KMSACLs implements Runnable, KeyACLs {
 if (blacklist == null) {
   LOG.debug("No blacklist for {}", type.toString());
 } else if (access) {
-  LOG.debug("user is in {}" , blacklist.getAclString());
-} else {
   LOG.debug("user is not in {}" , blacklist.getAclString());
+} else {
+  LOG.debug("user is in {}" , blacklist.getAclString());
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli

2018-05-30 Thread hanishakoneru
YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed 
by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ab960f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ab960f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ab960f

Branch: refs/heads/HDDS-48
Commit: 31ab960f4f931df273481927b897388895d803ba
Parents: 438ef49
Author: Jason Lowe 
Authored: Tue May 29 11:00:30 2018 -0500
Committer: Jason Lowe 
Committed: Tue May 29 11:00:30 2018 -0500

--
 hadoop-project/pom.xml  | 5 +
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml| 5 +
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 73c3f5b..59a9bd2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1144,6 +1144,11 @@
 1.8.5
   
   
+org.objenesis
+objenesis
+1.0
+  
+  
 org.mock-server
 mockserver-netty
 3.9.2

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index f310518..0527095 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -155,6 +155,11 @@
   leveldbjni-all
 
 
+
+  org.objenesis
+  objenesis
+
+
 
 
   org.apache.hadoop


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HADOOP-15477. Make unjar in RunJar overrideable

2018-05-30 Thread hanishakoneru
HADOOP-15477. Make unjar in RunJar overrideable

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d14e26b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d14e26b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d14e26b3

Branch: refs/heads/HDDS-48
Commit: d14e26b31fe46fb47a8e99a212c70016fd15a4d9
Parents: 0cf6e87
Author: Johan Gustavsson 
Authored: Mon May 28 17:29:59 2018 +0900
Committer: Akira Ajisaka 
Committed: Mon May 28 17:29:59 2018 +0900

--
 .../java/org/apache/hadoop/util/RunJar.java | 17 ++---
 .../java/org/apache/hadoop/util/TestRunJar.java | 37 ++--
 .../org/apache/hadoop/streaming/StreamJob.java  |  4 ++-
 3 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 9dd770c..f1b643c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -76,7 +76,11 @@ public class RunJar {
*/
   public static final String HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES =
   "HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES";
-
+  /**
+   * Environment key for disabling unjar in client code.
+   */
+  public static final String HADOOP_CLIENT_SKIP_UNJAR =
+  "HADOOP_CLIENT_SKIP_UNJAR";
   /**
* Buffer size for copy the content of compressed file to new file.
*/
@@ -93,7 +97,7 @@ public class RunJar {
* @throws IOException if an I/O error has occurred or toDir
* cannot be created and does not already exist
*/
-  public static void unJar(File jarFile, File toDir) throws IOException {
+  public void unJar(File jarFile, File toDir) throws IOException {
 unJar(jarFile, toDir, MATCH_ANY);
   }
 
@@ -292,8 +296,9 @@ public class RunJar {
   }
 }, SHUTDOWN_HOOK_PRIORITY);
 
-
-unJar(file, workDir);
+if (!skipUnjar()) {
+  unJar(file, workDir);
+}
 
 ClassLoader loader = createClassLoader(file, workDir);
 
@@ -364,6 +369,10 @@ public class RunJar {
 return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER));
   }
 
+  boolean skipUnjar() {
+return Boolean.parseBoolean(System.getenv(HADOOP_CLIENT_SKIP_UNJAR));
+  }
+
   String getHadoopClasspath() {
 return System.getenv(HADOOP_CLASSPATH);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 19485d6..ea07b97 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,10 +17,14 @@
  */
 package org.apache.hadoop.util;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
@@ -99,7 +103,7 @@ public class TestRunJar {
 
 // Unjar everything
 RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
- unjarDir);
+ unjarDir, MATCH_ANY);
 assertTrue("foobar unpacked",
new File(unjarDir, TestRunJar.FOOBAR_TXT).exists());
 assertTrue("foobaz unpacked",
@@ -177,7 +181,7 @@ public class TestRunJar {
 
 // Unjar everything
 RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
-unjarDir);
+unjarDir, MATCH_ANY);
 
 String failureMessage = "Last modify time was lost during unJar";
 assertEquals(failureMessage, MOCKED_NOW, new File(unjarDir, 
TestRunJar.FOOBAR_TXT).lastModified());
@@ -221,5 +225,34 @@ public class TestRunJar {
 // run RunJar
 runJar.run(args);
 // it should not throw an exception
+verify(runJar, times(1)).unJar(any(File.class), any(File.class));
+  }
+
+  @Test
+  public void 

[23/50] [abbrv] hadoop git commit: YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.

2018-05-30 Thread hanishakoneru
YARN-4781. Support intra-queue preemption for fairness ordering policy. 
Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c343669
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c343669
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c343669

Branch: refs/heads/HDDS-48
Commit: 7c343669baf660df3b70d58987d6e68aec54d6fa
Parents: 61df174
Author: Sunil G 
Authored: Mon May 28 16:32:53 2018 +0530
Committer: Sunil G 
Committed: Mon May 28 16:32:53 2018 +0530

--
 .../FifoIntraQueuePreemptionPlugin.java |  37 ++-
 .../capacity/IntraQueueCandidatesSelector.java  |  40 +++
 .../monitor/capacity/TempAppPerPartition.java   |   9 +
 .../AbstractComparatorOrderingPolicy.java   |   2 -
 ...alCapacityPreemptionPolicyMockFramework.java |  12 +-
 ...yPreemptionPolicyIntraQueueFairOrdering.java | 276 +++
 6 files changed, 366 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 40f333f..12c178c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAPriorityComparator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.IntraQueuePreemptionOrderPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -41,6 +42,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -263,8 +266,17 @@ public class FifoIntraQueuePreemptionPlugin
   Resource queueReassignableResource,
   PriorityQueue orderedByPriority) {
 
-Comparator reverseComp = Collections
-.reverseOrder(new TAPriorityComparator());
+Comparator reverseComp;
+OrderingPolicy queueOrderingPolicy =
+tq.leafQueue.getOrderingPolicy();
+if (queueOrderingPolicy instanceof FairOrderingPolicy
+&& (context.getIntraQueuePreemptionOrderPolicy()
+== IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+  reverseComp = Collections.reverseOrder(
+  new TAFairOrderingComparator(this.rc, clusterResource));
+} else {
+  reverseComp = Collections.reverseOrder(new TAPriorityComparator());
+}
 TreeSet orderedApps = new TreeSet<>(reverseComp);
 
 String partition = tq.partition;
@@ -355,7 +367,16 @@ public class FifoIntraQueuePreemptionPlugin
   TempQueuePerPartition tq, Collection apps,
   Resource clusterResource,
   Map perUserAMUsed) {
-TAPriorityComparator taComparator = new TAPriorityComparator();
+Comparator taComparator;
+OrderingPolicy orderingPolicy =
+tq.leafQueue.getOrderingPolicy();
+if (orderingPolicy instanceof FairOrderingPolicy
+&& 

[16/50] [abbrv] hadoop git commit: HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. 
Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8605a385
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8605a385
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8605a385

Branch: refs/heads/HDDS-48
Commit: 8605a38514b4f7a2a549c7ecf8e1421e61bb4d67
Parents: 2a9652e
Author: Inigo Goiri 
Authored: Fri May 25 19:43:33 2018 -0700
Committer: Inigo Goiri 
Committed: Fri May 25 19:43:33 2018 -0700

--
 .../org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8605a385/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 50d1e75..6da46de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,7 +41,9 @@ public class TestHDFSFileSystemContract extends 
FileSystemContractBaseTest {
 Configuration conf = new HdfsConfiguration();
 conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
 FileSystemContractBaseTest.TEST_UMASK);
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+File basedir = GenericTestUtils.getRandomizedTestDir();
+cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2)
+.build();
 fs = cluster.getFileSystem();
 defaultWorkingDirectory = "/user/" + 
UserGroupInformation.getCurrentUser().getShortUserName();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-8357. Fixed NPE when YARN service is saved and not deployed. Contributed by Chandni Singh

2018-05-30 Thread hanishakoneru
YARN-8357.  Fixed NPE when YARN service is saved and not deployed.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9852eb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9852eb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9852eb5

Branch: refs/heads/HDDS-48
Commit: d9852eb5897a25323ab0302c2c0decb61d310e5e
Parents: 7ff5a40
Author: Eric Yang 
Authored: Thu May 24 16:32:13 2018 -0400
Committer: Eric Yang 
Committed: Thu May 24 16:32:13 2018 -0400

--
 .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9852eb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 93a74e3..0ab3322 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1198,6 +1198,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 ServiceApiUtil.validateNameFormat(serviceName, getConfig());
 Service appSpec = new Service();
 appSpec.setName(serviceName);
+appSpec.setState(ServiceState.STOPPED);
 ApplicationId currentAppId = getAppId(serviceName);
 if (currentAppId == null) {
   LOG.info("Service {} does not have an application ID", serviceName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDDS-78. Add per volume level storage stats in SCM. Contributed by Shashikant Banerjee.

2018-05-30 Thread hanishakoneru
HDDS-78. Add per volume level storage stats in SCM.
Contributed by  Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf6e87f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf6e87f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf6e87f

Branch: refs/heads/HDDS-48
Commit: 0cf6e87f9212af10eae39cdcb1fe60e6d8191772
Parents: f24c842
Author: Anu Engineer 
Authored: Sat May 26 11:06:22 2018 -0700
Committer: Anu Engineer 
Committed: Sat May 26 11:11:14 2018 -0700

--
 .../placement/metrics/SCMNodeStat.java  |  21 --
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |   8 +
 .../hdds/scm/node/SCMNodeStorageStatMap.java| 230 +--
 .../hdds/scm/node/StorageReportResult.java  |  87 +++
 .../scm/node/TestSCMNodeStorageStatMap.java | 141 +---
 5 files changed, 356 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 4fe72fc..3c871d3 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -136,25 +136,4 @@ public class SCMNodeStat implements NodeStat {
   public int hashCode() {
 return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
   }
-
-
-  /**
-   * Truncate to 4 digits since uncontrolled precision is some times
-   * counter intuitive to what users expect.
-   * @param value - double.
-   * @return double.
-   */
-  private double truncateDecimals(double value) {
-final int multiplier = 1;
-return (double) ((long) (value * multiplier)) / multiplier;
-  }
-
-  /**
-   * get the scmUsed ratio
-   */
-  public  double getScmUsedratio() {
-double scmUsedRatio =
-truncateDecimals(getScmUsed().get() / (double) getCapacity().get());
-return scmUsedRatio;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
index f17a970..d81ff0f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 
+import java.util.Set;
 import java.util.UUID;
 
 /**
@@ -66,4 +68,10 @@ public interface SCMNodeStorageStatMXBean {
* @return long
*/
   long getTotalFreeSpace();
+
+  /**
+   * Returns the set of disks for a given Datanode.
+   * @return set of storage volumes
+   */
+  Set getStorageVolumes(UUID datanodeId);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 25cb357..f8ad2af 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -22,18 +22,18 @@ package org.apache.hadoop.hdds.scm.node;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.protocol.proto.
+StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import 

[03/50] [abbrv] hadoop git commit: YARN-6919. Add default volume mount list. Contributed by Eric Badger

2018-05-30 Thread hanishakoneru
YARN-6919. Add default volume mount list. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1388de18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1388de18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1388de18

Branch: refs/heads/HDDS-48
Commit: 1388de18ad51434569589a8f5b0b05c38fe02ab3
Parents: 774daa8
Author: Shane Kumpf 
Authored: Thu May 24 09:30:39 2018 -0600
Committer: Shane Kumpf 
Committed: Thu May 24 09:30:39 2018 -0600

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 ++
 .../src/main/resources/yarn-default.xml |  14 ++
 .../runtime/DockerLinuxContainerRuntime.java|  38 +
 .../runtime/TestDockerContainerRuntime.java | 138 +++
 4 files changed, 200 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 004a59f..f7f82f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2002,6 +2002,16 @@ public class YarnConfiguration extends Configuration {
*/
   public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10;
 
+  /** The default list of read-only mounts to be bind-mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_RO_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-ro-mounts";
+
+  /** The default list of read-write mounts to be bind-mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c82474c..b0ffc48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1811,6 +1811,20 @@
   
 
   
+The default list of read-only mounts to be bind-mounted
+  into all Docker containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-ro-mounts
+
+  
+
+  
+The default list of read-write mounts to be bind-mounted
+  into all Docker containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-rw-mounts
+
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index e131e9d..5e2233b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[12/50] [abbrv] hadoop git commit: HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.

2018-05-30 Thread hanishakoneru
HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed 
by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0d4b1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0d4b1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0d4b1c

Branch: refs/heads/HDDS-48
Commit: 1e0d4b1c283fb98a95c60a1723f594befb3c18a9
Parents: 02322de
Author: Inigo Goiri 
Authored: Fri May 25 09:10:32 2018 -0700
Committer: Inigo Goiri 
Committed: Fri May 25 09:14:28 2018 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0d4b1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
index 1507844..4afacd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -118,7 +118,7 @@ public class TestDataNodeFaultInjector {
   final MetricsDataNodeFaultInjector mdnFaultInjector) throws Exception {
 
 final Path baseDir = new Path(
-PathUtils.getTestDir(getClass()).getAbsolutePath(),
+PathUtils.getTestDir(getClass()).getPath(),
 GenericTestUtils.getMethodName());
 final DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
 DataNodeFaultInjector.set(mdnFaultInjector);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)

2018-05-30 Thread hanishakoneru
YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via 
wangda)

Change-Id: I79a42154e8f86ab1c3cc939b3745024b8eebe5f4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17aa40f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17aa40f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17aa40f6

Branch: refs/heads/HDDS-48
Commit: 17aa40f669f197d43387d67dc00040d14cd00948
Parents: 3061bfc
Author: Wangda Tan 
Authored: Tue May 29 09:27:36 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 29 09:27:36 2018 -0700

--
 .../apache/hadoop/yarn/util/resource/ResourceCalculator.java | 4 ++--
 .../monitor/capacity/CapacitySchedulerPreemptionUtils.java   | 8 
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 51078cd..27394f7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -260,10 +260,10 @@ public abstract class ResourceCalculator {
 
   /**
* Check if resource has any major resource types (which are all NodeManagers
-   * included) has a >0 value.
+   * included) has a {@literal >} 0 value.
*
* @param resource resource
-   * @return returns true if any resource is >0
+   * @return returns true if any resource is {@literal >} 0
*/
   public abstract boolean isAnyMajorResourceAboveZero(Resource resource);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 5396d61..690eb02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -136,12 +136,12 @@ public class CapacitySchedulerPreemptionUtils {
* @param conservativeDRF
*  should we do conservativeDRF preemption or not.
*  When true:
-   *stop preempt container when any major resource type <= 0 for 
to-
-   *preempt.
+   *stop preempt container when any major resource type
+   *{@literal <=} 0 for to-preempt.
*This is default preemption behavior of intra-queue preemption
*  When false:
-   *stop preempt container when: all major resource type <= 0 for
-   *to-preempt.
+   *stop preempt container when: all major resource type
+   *{@literal <=} 0 for to-preempt.
*This is default preemption behavior of inter-queue preemption
* @return should we preempt rmContainer. If we should, deduct from
* resourceToObtainByPartition


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

2018-05-30 Thread hanishakoneru
HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode 
failover

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61df174e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61df174e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61df174e

Branch: refs/heads/HDDS-48
Commit: 61df174e8b3d582183306cabfa2347c8b96322ff
Parents: 04757e5
Author: Karthik Palanisamy 
Authored: Mon May 28 19:41:07 2018 +0900
Committer: Akira Ajisaka 
Committed: Mon May 28 19:41:07 2018 +0900

--
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java   | 2 +-
 .../hadoop-common/src/main/resources/core-default.xml  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index a8c19ab..9295288 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -63,7 +63,7 @@ public abstract class ZKFailoverController {
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = 
"ha.zookeeper.session-timeout.ms";
-  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
+  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 10*1000;
   private static final String ZK_PARENT_ZNODE_KEY = 
"ha.zookeeper.parent-znode";
   public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
   private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 9564587..75acf48 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2168,7 +2168,7 @@
 
 
   ha.zookeeper.session-timeout.ms
-  5000
+  1
   
 The session timeout to use when the ZKFC connects to ZooKeeper.
 Setting this value to a lower value implies that server crashes


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain.

2018-05-30 Thread hanishakoneru
HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a9652e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a9652e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a9652e6

Branch: refs/heads/HDDS-48
Commit: 2a9652e69650973f6158b60ff131215827738db6
Parents: 13d2528
Author: Anu Engineer 
Authored: Fri May 25 15:40:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri May 25 15:45:50 2018 -0700

--
 .../hadoop/hdds/scm/client/HddsClientUtils.java | 23 +
 .../apache/hadoop/ozone/client/ObjectStore.java |  9 
 .../apache/hadoop/ozone/client/OzoneBucket.java | 24 +
 .../apache/hadoop/ozone/client/OzoneVolume.java | 18 +--
 .../hadoop/ozone/client/rest/RestClient.java| 52 
 .../hadoop/ozone/client/rpc/RpcClient.java  | 46 +++--
 6 files changed, 64 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index bc5f8d6..a6813eb 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -170,6 +170,29 @@ public final class HddsClientUtils {
   }
 
   /**
+   * verifies that bucket / volume name is a valid DNS name.
+   *
+   * @param resourceNames Array of bucket / volume names to be verified.
+   */
+  public static void verifyResourceName(String... resourceNames) {
+for (String resourceName : resourceNames) {
+  HddsClientUtils.verifyResourceName(resourceName);
+}
+  }
+
+  /**
+   * Checks that object parameters passed as reference is not null.
+   *
+   * @param references Array of object references to be checked.
+   * @param 
+   */
+  public static  void checkNotNull(T... references) {
+for (T ref: references) {
+  Preconditions.checkNotNull(ref);
+}
+  }
+
+  /**
* Returns the cache value to be used for list calls.
* @param conf Configuration object
* @return list cache size

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index d8b3011..c5f0689 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -63,8 +63,6 @@ public class ObjectStore {
* @throws IOException
*/
   public void createVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.createVolume(volumeName);
   }
 
@@ -76,9 +74,6 @@ public class ObjectStore {
*/
   public void createVolume(String volumeName, VolumeArgs volumeArgs)
   throws IOException {
-Preconditions.checkNotNull(volumeName);
-Preconditions.checkNotNull(volumeArgs);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.createVolume(volumeName, volumeArgs);
   }
 
@@ -89,8 +84,6 @@ public class ObjectStore {
* @throws IOException
*/
   public OzoneVolume getVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 OzoneVolume volume = proxy.getVolumeDetails(volumeName);
 return volume;
   }
@@ -150,8 +143,6 @@ public class ObjectStore {
* @throws IOException
*/
   public void deleteVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.deleteVolume(volumeName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 5df0254..2f3cff6 100644
--- 

[04/50] [abbrv] hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

2018-05-30 Thread hanishakoneru
HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63deb

Branch: refs/heads/HDDS-48
Commit: c9b63deb533274ca8ef4939f6cd13f728a067f7b
Parents: 1388de1
Author: Andrew Wang 
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang 
Committed: Thu May 24 09:56:23 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63deb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 579ac43..490ccb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -247,7 +247,7 @@ public class PBHelperClient {
 ByteString value = fixedByteStringCache.get(key);
 if (value == null) {
   value = ByteString.copyFromUtf8(key.toString());
-  fixedByteStringCache.put(key, value);
+  fixedByteStringCache.put(new Text(key.copyBytes()), value);
 }
 return value;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDFS-13628. Update Archival Storage doc for Provided Storage

2018-05-30 Thread hanishakoneru
HDFS-13628. Update Archival Storage doc for Provided Storage

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04757e58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04757e58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04757e58

Branch: refs/heads/HDDS-48
Commit: 04757e5864bd4904fd5a59d143fff480814700e4
Parents: 88cbe57
Author: Takanobu Asanuma 
Authored: Mon May 28 19:04:36 2018 +0900
Committer: Akira Ajisaka 
Committed: Mon May 28 19:06:34 2018 +0900

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04757e58/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index ab7975a..3c49cb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -35,7 +35,7 @@ A new storage type *ARCHIVE*, which has high storage density 
(petabyte of storag
 
 Another new storage type *RAM\_DISK* is added for supporting writing single 
replica files in memory.
 
-### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and Lazy\_Persist
+### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD, Lazy\_Persist and 
Provided
 
 A new concept of storage policies is introduced in order to allow files to be 
stored in different storage types according to the storage policy.
 
@@ -47,6 +47,7 @@ We have the following storage policies:
 * **All\_SSD** - for storing all replicas in SSD.
 * **One\_SSD** - for storing one of the replicas in SSD. The remaining 
replicas are stored in DISK.
 * **Lazy\_Persist** - for writing blocks with single replica in memory. The 
replica is first written in RAM\_DISK and then it is lazily persisted in DISK.
+* **Provided** - for storing data outside HDFS. See also [HDFS Provided 
Storage](./HdfsProvidedStorage.html).
 
 More formally, a storage policy consists of the following fields:
 
@@ -68,6 +69,7 @@ The following is a typical storage policy table.
 | 7 | Hot (default) | DISK: *n* | \ | ARCHIVE |
 | 5 | Warm | DISK: 1, ARCHIVE: *n*-1 | ARCHIVE, DISK | ARCHIVE, DISK |
 | 2 | Cold | ARCHIVE: *n* | \ | \ |
+| 1 | Provided | PROVIDED: 1, DISK: *n*-1 | PROVIDED, DISK | PROVIDED, DISK |
 
 Note 1: The Lazy\_Persist policy is useful only for single replica blocks. For 
blocks with more than one replicas, all the replicas will be written to DISK 
since writing only one of the replicas to RAM\_DISK does not improve the 
overall performance.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-13626. Fix incorrect username when deny the setOwner operation. Contributed by Zsolt Venczel.

2018-05-30 Thread hanishakoneru
HDFS-13626. Fix incorrect username when deny the setOwner operation. 
Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24098bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24098bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24098bc

Branch: refs/heads/HDDS-48
Commit: b24098bc8ffe976d662acabc168e20eac8cc8460
Parents: 5f6769f
Author: Yiqun Lin 
Authored: Wed May 30 16:52:21 2018 +0800
Committer: Yiqun Lin 
Committed: Wed May 30 16:52:21 2018 +0800

--
 .../hadoop/hdfs/server/namenode/FSDirAttrOp.java   |  4 ++--
 .../org/apache/hadoop/security/TestPermission.java | 13 -
 2 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24098bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 406fe80..1dbee96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -82,12 +82,12 @@ public class FSDirAttrOp {
   fsd.checkOwner(pc, iip);
   if (!pc.isSuperUser()) {
 if (username != null && !pc.getUser().equals(username)) {
-  throw new AccessControlException("User " + username
+  throw new AccessControlException("User " + pc.getUser()
   + " is not a super user (non-super user cannot change owner).");
 }
 if (group != null && !pc.isMemberOfGroup(group)) {
   throw new AccessControlException(
-  "User " + username + " does not belong to " + group);
+  "User " + pc.getUser() + " does not belong to " + group);
 }
   }
   unprotectedSetOwner(fsd, iip, username, group);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24098bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 813ac5a..388e7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -337,7 +337,8 @@ public class TestPermission {
   fail("Expect ACE when a non-super user tries to change a file to a " +
   "group where the user does not belong.");
 } catch (AccessControlException e) {
-  assertThat(e.getMessage(), startsWith("User null does not belong to"));
+  assertThat(e.getMessage(), startsWith("User " +
+  userfs.getFileStatus(file).getOwner() + " does not belong to"));
 }
   }
 
@@ -371,8 +372,9 @@ public class TestPermission {
   userfs.setOwner(file, NOUSER, null);
   fail("Expect ACE when a non-super user tries to change owner");
 } catch (AccessControlException e) {
-  assertThat(e.getMessage(), startsWith("User " + NOUSER
-  + " is not a super user (non-super user cannot change owner)"));
+  assertThat(e.getMessage(), startsWith("User " +
+  userfs.getFileStatus(file).getOwner() +
+  " is not a super user (non-super user cannot change owner)"));
 }
   }
 
@@ -397,8 +399,9 @@ public class TestPermission {
   fail("Expect ACE or FNFE when a non-super user tries to change owner " +
   "for a non-existent file");
 } catch (AccessControlException e) {
-  assertThat(e.getMessage(), startsWith("User " + NOUSER
-  + " is not a super user (non-super user cannot change owner)"));
+  assertThat(e.getMessage(), startsWith("User " +
+  userfs.getFileStatus(file).getOwner() +
+  " is not a super user (non-super user cannot change owner)"));
 } catch (FileNotFoundException e) {
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang 
Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbf4f01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbf4f01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbf4f01

Branch: refs/heads/HDDS-48
Commit: 9dbf4f01665d5480a70395a24519cbab5d4db0c5
Parents: 91d7c74
Author: Inigo Goiri 
Authored: Mon May 28 16:34:02 2018 -0700
Committer: Inigo Goiri 
Committed: Mon May 28 16:34:02 2018 -0700

--
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbf4f01/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index e82863a..c352dc9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2829,11 +2829,11 @@ public class TestDFSShell {
 System.setErr(origErr);
   }
 
-  assertEquals("Error message is not the expected error message",
-  "setrep: Requested replication factor of 1 is less than "
-  + "the required minimum of 2 for /tmp/TestDFSShell-"
-  + "testSetrepLow/testFileForSetrepLow\n",
-  bao.toString());
+  assertTrue("Error message is not the expected error message"
+  + bao.toString(), bao.toString().startsWith(
+  "setrep: Requested replication factor of 1 is less than "
+  + "the required minimum of 2 for /tmp/TestDFSShell-"
+  + "testSetrepLow/testFileForSetrepLow"));
 } finally {
   shell.close();
   cluster.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HDDS-88. Create separate message structure to represent ports in DatanodeDetails. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
HDDS-88. Create separate message structure to represent ports in 
DatanodeDetails.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b34148c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b34148c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b34148c

Branch: refs/heads/HDDS-48
Commit: 3b34148c4f7380d201de59c4a1870b597649248f
Parents: b24098b
Author: Anu Engineer 
Authored: Wed May 30 08:52:07 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 30 08:52:07 2018 -0700

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   |   2 +-
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   2 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java   | 219 +++
 .../main/java/org/apache/ratis/RatisHelper.java |   6 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  10 +-
 .../common/transport/server/XceiverServer.java  |   3 +-
 .../transport/server/XceiverServerGrpc.java |   3 +-
 .../server/ratis/XceiverServerRatis.java|   3 +-
 .../common/TestDatanodeStateMachine.java|  16 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  12 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |  18 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  17 +-
 .../apache/hadoop/ozone/RatisTestHelper.java|   3 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   8 +-
 .../ozone/container/ContainerTestHelper.java|  12 +-
 .../container/metrics/TestContainerMetrics.java |   3 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  11 +-
 .../container/server/TestContainerServer.java   |   8 +-
 .../ksm/TestKeySpaceManagerRestInterface.java   |   5 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |   4 +-
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |   4 +-
 .../hadoop/ozone/web/TestOzoneWebAccess.java|   4 +-
 .../ozone/web/client/TestOzoneClient.java   |   4 +-
 .../hadoop/ozone/web/client/TestVolume.java |   2 -
 .../ozone/web/client/TestVolumeRatis.java   |   4 +-
 .../ozone/web/OzoneHddsDatanodeService.java |   5 +-
 .../hadoop/ozone/ksm/KeySpaceManager.java   |   4 +-
 .../hadoop/ozone/genesis/GenesisUtil.java   |  12 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   6 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |   3 +-
 30 files changed, 260 insertions(+), 153 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 42e02f9..709f0dc 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -93,7 +93,7 @@ public class XceiverClient extends XceiverClientSpi {
 
 // read port from the data node, on failure use default configured
 // port.
-int port = leader.getContainerPort();
+int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
 if (port == 0) {
   port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
   OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 84790e8..c787024 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -80,7 +80,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
 // read port from the data node, on failure use default configured
 // port.
-int port = leader.getContainerPort();
+int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
 if (port == 0) {
   port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
   OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 

[48/50] [abbrv] hadoop git commit: YARN-8350. NPE in service AM related to placement policy. Contributed by Gour Saha

2018-05-30 Thread hanishakoneru
YARN-8350. NPE in service AM related to placement policy. Contributed by Gour 
Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/778a4a24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/778a4a24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/778a4a24

Branch: refs/heads/HDDS-48
Commit: 778a4a24be176382a5704f709c00bdfcfe6ddc8c
Parents: 96eefcc
Author: Billie Rinaldi 
Authored: Wed May 30 13:19:13 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed May 30 13:19:13 2018 -0700

--
 .../yarn/service/component/Component.java   | 114 ++-
 .../exceptions/RestApiErrorMessages.java|   8 ++
 .../yarn/service/utils/ServiceApiUtil.java  |  24 +++-
 .../hadoop/yarn/service/TestServiceApiUtil.java |  44 ++-
 4 files changed, 130 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/778a4a24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 931877e..a1ee796 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -694,62 +694,66 @@ public class Component implements 
EventHandler {
   // composite constraints then this AND-ed composite constraint is not
   // used.
   PlacementConstraint finalConstraint = null;
-  for (org.apache.hadoop.yarn.service.api.records.PlacementConstraint
-  yarnServiceConstraint : placementPolicy.getConstraints()) {
-List targetExpressions = new ArrayList<>();
-// Currently only intra-application allocation tags are supported.
-if (!yarnServiceConstraint.getTargetTags().isEmpty()) {
-  targetExpressions.add(PlacementTargets.allocationTag(
-  yarnServiceConstraint.getTargetTags().toArray(new String[0])));
-}
-// Add all node attributes
-for (Map.Entry> attribute : yarnServiceConstraint
-.getNodeAttributes().entrySet()) {
-  targetExpressions.add(PlacementTargets.nodeAttribute(
-  attribute.getKey(), attribute.getValue().toArray(new 
String[0])));
-}
-// Add all node partitions
-if (!yarnServiceConstraint.getNodePartitions().isEmpty()) {
-  targetExpressions
-  .add(PlacementTargets.nodePartition(yarnServiceConstraint
-  .getNodePartitions().toArray(new String[0])));
-}
-PlacementConstraint constraint = null;
-switch (yarnServiceConstraint.getType()) {
-case AFFINITY:
-  constraint = PlacementConstraints
-  .targetIn(yarnServiceConstraint.getScope().getValue(),
-  targetExpressions.toArray(new TargetExpression[0]))
-  .build();
-  break;
-case ANTI_AFFINITY:
-  constraint = PlacementConstraints
-  .targetNotIn(yarnServiceConstraint.getScope().getValue(),
-  targetExpressions.toArray(new TargetExpression[0]))
-  .build();
-  break;
-case AFFINITY_WITH_CARDINALITY:
-  constraint = PlacementConstraints.targetCardinality(
-  yarnServiceConstraint.getScope().name().toLowerCase(),
-  yarnServiceConstraint.getMinCardinality() == null ? 0
-  : yarnServiceConstraint.getMinCardinality().intValue(),
-  yarnServiceConstraint.getMaxCardinality() == null
-  ? Integer.MAX_VALUE
-  : yarnServiceConstraint.getMaxCardinality().intValue(),
-  targetExpressions.toArray(new TargetExpression[0])).build();
-  break;
-}
-// The default AND-ed final composite constraint
-if (finalConstraint != null) {
-  finalConstraint = PlacementConstraints
-  .and(constraint.getConstraintExpr(),
-  finalConstraint.getConstraintExpr())
-  .build();
-} else {
-  finalConstraint = constraint;
+  if (placementPolicy != null) {
+for 

[40/50] [abbrv] hadoop git commit: YARN-8362. Bugfix logic in container retries in node manager. Contributed by Chandni Singh

2018-05-30 Thread hanishakoneru
YARN-8362.  Bugfix logic in container retries in node manager.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/135941e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/135941e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/135941e0

Branch: refs/heads/HDDS-48
Commit: 135941e00d762a417c3b4cc524cdc59b0d1810b1
Parents: 2416906
Author: Eric Yang 
Authored: Tue May 29 16:56:58 2018 -0400
Committer: Eric Yang 
Committed: Tue May 29 16:56:58 2018 -0400

--
 .../container/ContainerImpl.java|  4 +-
 .../container/SlidingWindowRetryPolicy.java | 62 +++-
 .../container/TestSlidingWindowRetryPolicy.java |  6 ++
 3 files changed, 44 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/135941e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index c09c7f1..5527ac4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -1602,8 +1602,10 @@ public class ContainerImpl implements Container {
 }
 container.addDiagnostics(exitEvent.getDiagnosticInfo() + "\n");
   }
-
   if (container.shouldRetry(container.exitCode)) {
+// Updates to the retry context should  be protected from concurrent
+// writes. It should only be called from this transition.
+container.retryPolicy.updateRetryContext(container.windowRetryContext);
 container.storeRetryContext();
 doRelaunch(container,
 container.windowRetryContext.getRemainingRetries(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/135941e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 0208879..36a8b91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -42,49 +42,40 @@ public class SlidingWindowRetryPolicy {
 
   public boolean shouldRetry(RetryContext retryContext,
   int errorCode) {
-ContainerRetryContext containerRC = retryContext
-.containerRetryContext;
+ContainerRetryContext containerRC = retryContext.containerRetryContext;
 Preconditions.checkNotNull(containerRC, "container retry context null");
 ContainerRetryPolicy retryPolicy = containerRC.getRetryPolicy();
 if (retryPolicy == ContainerRetryPolicy.RETRY_ON_ALL_ERRORS
 || (retryPolicy == ContainerRetryPolicy.RETRY_ON_SPECIFIC_ERROR_CODES
 && containerRC.getErrorCodes() != null
 && containerRC.getErrorCodes().contains(errorCode))) {
-  if (containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER) {
-return true;
-  }
-  int pendingRetries = calculatePendingRetries(retryContext);
-  updateRetryContext(retryContext, pendingRetries);
-  return pendingRetries > 0;
+  return containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER
+  || calculateRemainingRetries(retryContext) > 0;
 }
 return false;
   }
 
   

[29/50] [abbrv] hadoop git commit: HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HADOOP-15497. TestTrash should use proper test path to avoid failing on 
Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c75f8e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c75f8e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c75f8e4

Branch: refs/heads/HDDS-48
Commit: 3c75f8e4933221fa60a87e86a3db5e4727530b6f
Parents: 31ab960
Author: Inigo Goiri 
Authored: Tue May 29 09:11:08 2018 -0700
Committer: Inigo Goiri 
Committed: Tue May 29 09:11:08 2018 -0700

--
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c75f8e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 12aed29..fa2d21f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -49,9 +49,11 @@ import org.apache.hadoop.util.Time;
  */
 public class TestTrash {
 
-  private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
+  private final static File BASE_PATH = new File(GenericTestUtils.getTempPath(
   "testTrash"));
 
+  private final static Path TEST_DIR = new Path(BASE_PATH.getAbsolutePath());
+
   @Before
   public void setUp() throws IOException {
 // ensure each test initiates a FileSystem instance,
@@ -682,7 +684,7 @@ public class TestTrash {
   static class TestLFS extends LocalFileSystem {
 Path home;
 TestLFS() {
-  this(new Path(TEST_DIR, "user/test"));
+  this(TEST_DIR);
 }
 TestLFS(final Path home) {
   super(new RawLocalFileSystem() {
@@ -809,8 +811,8 @@ public class TestTrash {
*/
   public static void verifyTrashPermission(FileSystem fs, Configuration conf)
   throws IOException {
-Path caseRoot = new Path(
-GenericTestUtils.getTempPath("testTrashPermission"));
+Path caseRoot = new Path(BASE_PATH.getPath(),
+"testTrashPermission");
 try (FileSystem fileSystem = fs){
   Trash trash = new Trash(fileSystem, conf);
   FileSystemTestWrapper wrapper =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by 
Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d7c74e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d7c74e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d7c74e

Branch: refs/heads/HDDS-48
Commit: 91d7c74e6aa4850922f68bab490b585443e4fccb
Parents: 7c34366
Author: Inigo Goiri 
Authored: Mon May 28 10:26:47 2018 -0700
Committer: Inigo Goiri 
Committed: Mon May 28 10:26:47 2018 -0700

--
 .../org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d7c74e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
index 4335527..c63ba34 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
@@ -40,6 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.security.NoSuchAlgorithmException;
@@ -91,8 +92,10 @@ public class TestErasureCodingExerciseAPIs {
 // Set up java key store
 String testRootDir = Paths.get(new FileSystemTestHelper().getTestRootDir())
 .toString();
+Path targetFile = new Path(new File(testRootDir).getAbsolutePath(),
+"test.jks");
 String keyProviderURI = JavaKeyStoreProvider.SCHEME_NAME + "://file"
-+ new Path(testRootDir, "test.jks").toUri();
++ targetFile.toUri();
 conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
 keyProviderURI);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: HDDS-90: Create ContainerData, Container classes. Contributed by Bharat Viswanadham

2018-05-30 Thread hanishakoneru
HDDS-90: Create ContainerData, Container classes. Contributed by Bharat 
Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cd19b45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cd19b45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cd19b45

Branch: refs/heads/HDDS-48
Commit: 6cd19b45efbbcce6d6ca4b5c0eb3beb42d95e558
Parents: ee1e0e2
Author: Bharat Viswanadham 
Authored: Fri May 25 15:11:19 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:04:19 2018 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   8 +
 .../common/impl/ChunkLayOutVersion.java |  80 +++
 .../container/common/impl/ContainerData.java| 234 +++
 .../common/impl/KeyValueContainer.java  |  74 ++
 .../common/impl/KeyValueContainerData.java  | 159 +
 .../container/common/interfaces/Container.java  |  75 ++
 .../common/TestChunkLayOutVersion.java  |  42 
 .../common/TestKeyValueContainerData.java   | 119 ++
 8 files changed, 791 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd19b45/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 53da18a..72e1006 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -232,6 +232,14 @@ message ContainerData {
   optional string containerDBType = 11;
 }
 
+// This is used for create Container Request.
+message CreateContainerData {
+  required int64 containerId = 1;
+  repeated KeyValue metadata = 2;
+  optional ContainerType containerType = 3 [default = KeyValueContainer];
+}
+
+
 enum ContainerType {
   KeyValueContainer = 1;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd19b45/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
new file mode 100644
index 000..fff68de6
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.impl;
+
+
+/**
+ * Defines layout versions for the Chunks.
+ */
+
+public final class ChunkLayOutVersion {
+
+  private final static ChunkLayOutVersion[] CHUNK_LAYOUT_VERSION_INFOS =
+  {new ChunkLayOutVersion(1, "Data without checksums.")};
+
+  private int version;
+  private String description;
+
+
+  /**
+   * Never created outside this class.
+   *
+   * @param description -- description
+   * @param version -- version number
+   */
+  private ChunkLayOutVersion(int version, String description) {
+this.version = version;
+this.description = description;
+  }
+
+  /**
+   * Returns all versions.
+   *
+   * @return Version info array.
+   */
+  public static ChunkLayOutVersion[] getAllVersions() {
+return CHUNK_LAYOUT_VERSION_INFOS.clone();
+  }
+
+  /**
+   * Returns the latest version.
+   *
+   * @return versionInfo
+   */
+  public static ChunkLayOutVersion getLatestVersion() {
+return CHUNK_LAYOUT_VERSION_INFOS[CHUNK_LAYOUT_VERSION_INFOS.length - 1];
+  }
+
+  /**
+   * Return version.
+   *
+   * @return int
+   */
+  public int getVersion() {
+return version;
+  }
+
+  /**
+   * Returns description.
+   * @return String
+   */
+  public String getDescription() {
+return description;
+  }
+
+}


[44/50] [abbrv] hadoop git commit: YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed by Takanobu Asanuma

2018-05-30 Thread hanishakoneru
YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed 
by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e44c0849
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e44c0849
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e44c0849

Branch: refs/heads/HDDS-48
Commit: e44c0849d7982c8f1ed43af25d2092090881d19f
Parents: 3b34148
Author: Eric E Payne 
Authored: Wed May 30 16:50:19 2018 +
Committer: Eric E Payne 
Committed: Wed May 30 16:50:19 2018 +

--
 .../containermanager/container/SlidingWindowRetryPolicy.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e44c0849/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 36a8b91..9360669 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -85,8 +85,9 @@ public class SlidingWindowRetryPolicy {
* Updates remaining retries and the restart time when
* required in the retryContext.
* 
-   * When failuresValidityInterval is > 0, it also removes time entries from
-   * restartTimes which are outside the validity interval.
+   * When failuresValidityInterval is {@literal >} 0, it also removes time
+   * entries from restartTimes which are outside the validity
+   * interval.
*/
   protected void updateRetryContext(RetryContext retryContext) {
 if (retryContext.containerRetryContext.getFailuresValidityInterval() > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: YARN-8329. Docker client configuration can still be set incorrectly. Contributed by Shane Kumpf

2018-05-30 Thread hanishakoneru
YARN-8329. Docker client configuration can still be set incorrectly. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4827e9a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4827e9a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4827e9a9

Branch: refs/heads/HDDS-48
Commit: 4827e9a9085b306bc379cb6e0b1fe4b92326edcd
Parents: e3236a9
Author: Jason Lowe 
Authored: Tue May 29 14:43:17 2018 -0500
Committer: Jason Lowe 
Committed: Tue May 29 14:43:17 2018 -0500

--
 .../yarn/util/DockerClientConfigHandler.java| 23 +++-
 .../security/TestDockerClientConfigHandler.java |  4 ++--
 .../runtime/DockerLinuxContainerRuntime.java|  7 +++---
 3 files changed, 19 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4827e9a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
index 5522cf4..8ec4deb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
@@ -154,14 +154,15 @@ public final class DockerClientConfigHandler {
* @param outConfigFile the File to write the Docker client configuration to.
* @param credentials the populated Credentials object.
* @throws IOException if the write fails.
+   * @return true if a Docker credential is found in the supplied credentials.
*/
-  public static void writeDockerCredentialsToPath(File outConfigFile,
+  public static boolean writeDockerCredentialsToPath(File outConfigFile,
   Credentials credentials) throws IOException {
-ObjectMapper mapper = new ObjectMapper();
-ObjectNode rootNode = mapper.createObjectNode();
-ObjectNode registryUrlNode = mapper.createObjectNode();
 boolean foundDockerCred = false;
 if (credentials.numberOfTokens() > 0) {
+  ObjectMapper mapper = new ObjectMapper();
+  ObjectNode rootNode = mapper.createObjectNode();
+  ObjectNode registryUrlNode = mapper.createObjectNode();
   for (Token tk : credentials.getAllTokens()) {
 if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
   foundDockerCred = true;
@@ -176,12 +177,14 @@ public final class DockerClientConfigHandler {
   }
 }
   }
+  if (foundDockerCred) {
+rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
+String json = mapper.writerWithDefaultPrettyPrinter()
+.writeValueAsString(rootNode);
+FileUtils.writeStringToFile(
+outConfigFile, json, StandardCharsets.UTF_8);
+  }
 }
-if (foundDockerCred) {
-  rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
-  String json =
-  mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
-  FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
-}
+return foundDockerCred;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4827e9a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
index c4cbe45..cfe5a45 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
@@ -116,8 +116,8 @@ public class TestDockerClientConfigHandler {
 Credentials credentials =
 DockerClientConfigHandler.readCredentialsFromConfigFile(
 new Path(file.toURI()), conf, APPLICATION_ID);
-DockerClientConfigHandler.writeDockerCredentialsToPath(outFile,
-credentials);
+assertTrue(DockerClientConfigHandler.writeDockerCredentialsToPath(outFile,
+credentials));
 assertTrue(outFile.exists());
 String fileContents = 

[46/50] [abbrv] hadoop git commit: HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to 
MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47c31ff1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47c31ff1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47c31ff1

Branch: refs/heads/HDDS-48
Commit: 47c31ff16b452d47afc6ffc1cf936ac2de9b788d
Parents: 8197b9b
Author: Inigo Goiri 
Authored: Wed May 30 10:22:04 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 30 10:22:04 2018 -0700

--
 .../server/diskbalancer/DiskBalancerTestUtil.java|  5 -
 .../command/TestDiskBalancerCommand.java | 15 +++
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47c31ff1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
index bd8dbce..fef9c63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 
 import org.slf4j.Logger;
@@ -46,6 +47,7 @@ import org.slf4j.LoggerFactory;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.Random;
 import java.util.UUID;
@@ -307,7 +309,8 @@ public class DiskBalancerTestUtil {
 "need to specify capacities for two storages.");
 
 // Write a file and restart the cluster
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+File basedir = new File(GenericTestUtils.getRandomizedTempPath());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, basedir)
 .numDataNodes(numDatanodes)
 .storageCapacities(storageCapacities)
 .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47c31ff1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 8266c1f..dee2a90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -615,15 +615,15 @@ public class TestDiskBalancerCommand {
 assertThat(
 outputs.get(3),
 is(allOf(containsString("DISK"),
-containsString(cluster.getInstanceStorageDir(0, 0)
-.getAbsolutePath()),
+containsString(new Path(cluster.getInstanceStorageDir(0, 0)
+.getAbsolutePath()).toString()),
 containsString("0.00"),
 containsString("1.00";
 assertThat(
 outputs.get(4),
 is(allOf(containsString("DISK"),
-containsString(cluster.getInstanceStorageDir(0, 1)
-.getAbsolutePath()),
+containsString(new Path(cluster.getInstanceStorageDir(0, 1)
+.getAbsolutePath()).toString()),
 containsString("0.00"),
 containsString("1.00";
   }
@@ -719,9 +719,7 @@ public class TestDiskBalancerCommand {
   @Test
   public void testPrintFullPathOfPlan()
   throws Exception {
-final Path parent = new Path(
-PathUtils.getTestPath(getClass()),
-GenericTestUtils.getMethodName());
+String parent = GenericTestUtils.getRandomizedTempPath();
 
 MiniDFSCluster miniCluster 

[39/50] [abbrv] hadoop git commit: HDDS-114. Ozone Datanode mbean registration fails for StorageLocation. Contributed by Elek, Marton.

2018-05-30 Thread hanishakoneru
HDDS-114. Ozone Datanode mbean registration fails for StorageLocation.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24169062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24169062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24169062

Branch: refs/heads/HDDS-48
Commit: 24169062e5f4e7798a47c5e6e3e94504cba73092
Parents: 30284d0
Author: Anu Engineer 
Authored: Tue May 29 13:23:58 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 29 13:48:55 2018 -0700

--
 .../common/impl/StorageLocationReport.java  | 52 +++-
 .../ContainerLocationManagerMXBean.java |  4 +-
 .../interfaces/StorageLocationReportMXBean.java | 40 +++
 3 files changed, 71 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24169062/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 87b9656..061d09b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.
 StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
 StorageContainerDatanodeProtocolProtos.StorageTypeProto;
+import org.apache.hadoop.ozone.container.common.interfaces
+.StorageLocationReportMXBean;
 
 import java.io.IOException;
 
@@ -30,7 +32,8 @@ import java.io.IOException;
  * Storage location stats of datanodes that provide back store for containers.
  *
  */
-public class StorageLocationReport {
+public final class StorageLocationReport implements
+StorageLocationReportMXBean {
 
   private final String id;
   private final boolean failed;
@@ -76,6 +79,11 @@ public class StorageLocationReport {
 return storageLocation;
   }
 
+  @Override
+  public String getStorageTypeName() {
+return storageType.name();
+  }
+
   public StorageType getStorageType() {
 return storageType;
   }
@@ -204,76 +212,76 @@ public class StorageLocationReport {
 /**
  * Sets the storageId.
  *
- * @param id storageId
+ * @param idValue storageId
  * @return StorageLocationReport.Builder
  */
-public Builder setId(String id) {
-  this.id = id;
+public Builder setId(String idValue) {
+  this.id = idValue;
   return this;
 }
 
 /**
  * Sets whether the volume failed or not.
  *
- * @param failed whether volume failed or not
+ * @param failedValue whether volume failed or not
  * @return StorageLocationReport.Builder
  */
-public Builder setFailed(boolean failed) {
-  this.failed = failed;
+public Builder setFailed(boolean failedValue) {
+  this.failed = failedValue;
   return this;
 }
 
 /**
  * Sets the capacity of volume.
  *
- * @param capacity capacity
+ * @param capacityValue capacity
  * @return StorageLocationReport.Builder
  */
-public Builder setCapacity(long capacity) {
-  this.capacity = capacity;
+public Builder setCapacity(long capacityValue) {
+  this.capacity = capacityValue;
   return this;
 }
 /**
  * Sets the scmUsed Value.
  *
- * @param scmUsed storage space used by scm
+ * @param scmUsedValue storage space used by scm
  * @return StorageLocationReport.Builder
  */
-public Builder setScmUsed(long scmUsed) {
-  this.scmUsed = scmUsed;
+public Builder setScmUsed(long scmUsedValue) {
+  this.scmUsed = scmUsedValue;
   return this;
 }
 
 /**
  * Sets the remaining free space value.
  *
- * @param remaining remaining free space
+ * @param remainingValue remaining free space
  * @return StorageLocationReport.Builder
  */
-public Builder setRemaining(long remaining) {
-  this.remaining = remaining;
+public Builder setRemaining(long remainingValue) {
+  this.remaining = remainingValue;
   return this;
 }
 
 /**
  * Sets the storageType.
  *
- * @param storageType type of the storage used
+ * @param storageTypeValue type of the storage used
  * @return StorageLocationReport.Builder
  */
-public Builder setStorageType(StorageType storageType) {
-  

[10/50] [abbrv] hadoop git commit: YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan.

2018-05-30 Thread hanishakoneru
YARN-8292: Fix the dominant resource preemption cannot happen when some of the 
resource vector becomes negative. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d5509c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d5509c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d5509c6

Branch: refs/heads/HDDS-48
Commit: 8d5509c68156faaa6641f4e747fc9ff80adccf88
Parents: bddfe79
Author: Eric E Payne 
Authored: Fri May 25 16:06:09 2018 +
Committer: Eric E Payne 
Committed: Fri May 25 16:06:09 2018 +

--
 .../resource/DefaultResourceCalculator.java |  15 ++-
 .../resource/DominantResourceCalculator.java|  39 ---
 .../yarn/util/resource/ResourceCalculator.java  |  13 ++-
 .../hadoop/yarn/util/resource/Resources.java|   5 -
 .../AbstractPreemptableResourceCalculator.java  |  58 ---
 .../CapacitySchedulerPreemptionUtils.java   |  61 +--
 .../capacity/FifoCandidatesSelector.java|   8 +-
 .../FifoIntraQueuePreemptionPlugin.java |   4 +-
 .../capacity/IntraQueueCandidatesSelector.java  |   2 +-
 .../capacity/PreemptableResourceCalculator.java |   6 +-
 .../monitor/capacity/TempQueuePerPartition.java |   8 +-
 ...alCapacityPreemptionPolicyMockFramework.java |  30 ++
 .../TestPreemptionForQueueWithPriorities.java   | 103 ---
 ...pacityPreemptionPolicyInterQueueWithDRF.java |  60 ++-
 14 files changed, 312 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 6375c4a..ab6d7f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -136,13 +136,18 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-return resource.getMemorySize() == 0f;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
 return Resources.createResource(
 roundDown((r.getMemorySize()), stepFactor.getMemorySize()));
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {
+return resource.getMemorySize() <= 0;
+  }
+
+  @Override
+  public boolean isAnyMajorResourceAboveZero(Resource resource) {
+return resource.getMemorySize() > 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6fed23b..2e85ebc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -577,19 +577,6 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
-for (int i = 0; i < maxLength; i++) {
-  ResourceInformation resourceInformation = resource
-  .getResourceInformation(i);
-  if (resourceInformation.getValue() == 0L) {
-return true;
-  }
-}
-return false;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
 Resource ret = Resource.newInstance(r);
 int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
@@ -613,4 +600,30 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 }
 return ret;
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {

[14/50] [abbrv] hadoop git commit: HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang 
Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d25289
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d25289
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d25289

Branch: refs/heads/HDDS-48
Commit: 13d25289076b39daf481fb1ee15939dbfe4a6b23
Parents: 8733012
Author: Inigo Goiri 
Authored: Fri May 25 13:32:34 2018 -0700
Committer: Inigo Goiri 
Committed: Fri May 25 13:32:34 2018 -0700

--
 .../hdfs/server/namenode/TestAuditLoggerWithCommands.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d25289/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index 41ee03f..222a1de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -1264,8 +1264,9 @@ public class TestAuditLoggerWithCommands {
   }
 
   private int verifyAuditLogs(String pattern) {
-int length = auditlog.getOutput().split("\n").length;
-String lastAudit = auditlog.getOutput().split("\n")[length - 1];
+int length = auditlog.getOutput().split(System.lineSeparator()).length;
+String lastAudit = auditlog.getOutput()
+.split(System.lineSeparator())[length - 1];
 assertTrue("Unexpected log!", lastAudit.matches(pattern));
 return length;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen)

2018-05-30 Thread hanishakoneru
YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86bc6425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86bc6425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86bc6425

Branch: refs/heads/HDDS-48
Commit: 86bc6425d425913899f1d951498bd040e453b3d0
Parents: d9852eb
Author: Haibo Chen 
Authored: Thu May 24 17:07:21 2018 -0700
Committer: Haibo Chen 
Committed: Thu May 24 17:12:34 2018 -0700

--
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSLeafQueue.java |  31 ++
 .../resourcemanager/scheduler/fair/FSQueue.java |   9 +
 .../scheduler/fair/FairScheduler.java   |  29 +-
 .../scheduler/fair/QueueManager.java| 155 +++--
 .../fair/TestAllocationFileLoaderService.java   | 100 +++---
 .../scheduler/fair/TestQueueManager.java| 337 +++
 7 files changed, 596 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index d8d9051..7a40b6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -87,7 +87,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   private Path allocFile;
   private FileSystem fs;
 
-  private Listener reloadListener;
+  private final Listener reloadListener;
 
   @VisibleForTesting
   long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS;
@@ -95,15 +95,16 @@ public class AllocationFileLoaderService extends 
AbstractService {
   private Thread reloadThread;
   private volatile boolean running = true;
 
-  public AllocationFileLoaderService() {
-this(SystemClock.getInstance());
+  public AllocationFileLoaderService(Listener reloadListener) {
+this(reloadListener, SystemClock.getInstance());
   }
 
   private List defaultPermissions;
 
-  public AllocationFileLoaderService(Clock clock) {
+  public AllocationFileLoaderService(Listener reloadListener, Clock clock) {
 super(AllocationFileLoaderService.class.getName());
 this.clock = clock;
+this.reloadListener = reloadListener;
   }
 
   @Override
@@ -114,6 +115,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   reloadThread = new Thread(() -> {
 while (running) {
   try {
+reloadListener.onCheck();
 long time = clock.getTime();
 long lastModified =
 fs.getFileStatus(allocFile).getModificationTime();
@@ -207,10 +209,6 @@ public class AllocationFileLoaderService extends 
AbstractService {
 return allocPath;
   }
 
-  public synchronized void setReloadListener(Listener reloadListener) {
-this.reloadListener = reloadListener;
-  }
-
   /**
* Updates the allocation list from the allocation config file. This file is
* expected to be in the XML format specified in the design doc.
@@ -351,5 +349,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
   public interface Listener {
 void onReload(AllocationConfiguration info) throws IOException;
+
+void onCheck();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 

[13/50] [abbrv] hadoop git commit: HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia.

2018-05-30 Thread hanishakoneru
HDDS-96. Add an option in ozone script to generate a site file with minimally 
required ozone configs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8733012a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8733012a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8733012a

Branch: refs/heads/HDDS-48
Commit: 8733012ae35f2762d704f94975a762885d116795
Parents: 1e0d4b1
Author: Anu Engineer 
Authored: Fri May 25 13:06:14 2018 -0700
Committer: Anu Engineer 
Committed: Fri May 25 13:06:14 2018 -0700

--
 .../hadoop/hdds/conf/OzoneConfiguration.java|   6 +-
 hadoop-ozone/common/src/main/bin/ozone  |   4 +
 ...TestGenerateOzoneRequiredConfigurations.java | 100 +++
 .../GenerateOzoneRequiredConfigurations.java| 174 +++
 .../hadoop/ozone/genconf/package-info.java  |  24 +++
 5 files changed, 305 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index f07718c..36d953c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -137,7 +137,7 @@ public class OzoneConfiguration extends Configuration {
 
 @Override
 public String toString() {
-  return this.getName() + " " + this.getValue() + this.getTag();
+  return this.getName() + " " + this.getValue() + " " + this.getTag();
 }
 
 @Override
@@ -152,11 +152,11 @@ public class OzoneConfiguration extends Configuration {
 }
   }
 
-  public static void activate(){
+  public static void activate() {
 // adds the default resources
 Configuration.addDefaultResource("hdfs-default.xml");
 Configuration.addDefaultResource("hdfs-site.xml");
 Configuration.addDefaultResource("ozone-default.xml");
 Configuration.addDefaultResource("ozone-site.xml");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/common/src/main/bin/ozone
--
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index 00261c7..6843bdd 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -47,6 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager 
service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container 
Manager "
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "genconf" client "generate minimally required ozone 
configs and output to ozone-site.xml in specified path"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -118,6 +119,9 @@ function ozonecmd_case
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;
+genconf)
+  
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations
+;;
 *)
   HADOOP_CLASSNAME="${subcmd}"
   if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
new file mode 100644
index 000..82582a6
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, 

[02/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.

2018-05-30 Thread hanishakoneru
HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/774daa8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/774daa8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/774daa8d

Branch: refs/heads/HDDS-48
Commit: 774daa8d532f91fe8e342a8da2cfa65a8629
Parents: c05b5d4
Author: Mukul Kumar Singh 
Authored: Thu May 24 15:53:42 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu May 24 15:53:42 2018 +0530

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   |  22 +-
 .../hadoop/ozone/web/client/OzoneBucket.java| 646 ---
 .../hadoop/ozone/web/client/OzoneKey.java   |  44 -
 .../ozone/web/client/OzoneRestClient.java   | 804 ---
 .../hadoop/ozone/web/client/OzoneVolume.java| 584 --
 .../hadoop/ozone/web/client/package-info.java   |  34 -
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   3 +-
 .../apache/hadoop/ozone/RatisTestHelper.java|  14 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java | 207 ++---
 .../hadoop/ozone/web/client/TestBuckets.java| 193 +++--
 .../ozone/web/client/TestBucketsRatis.java  |  15 +-
 .../hadoop/ozone/web/client/TestKeys.java   | 286 ---
 .../hadoop/ozone/web/client/TestKeysRatis.java  |  29 +-
 .../hadoop/ozone/web/client/TestVolume.java | 285 +++
 .../ozone/web/client/TestVolumeRatis.java   |  29 +-
 15 files changed, 548 insertions(+), 2647 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 6d33cd4..42e02f9 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -54,6 +54,7 @@ public class XceiverClient extends XceiverClientSpi {
   private Bootstrap b;
   private EventLoopGroup group;
   private final Semaphore semaphore;
+  private boolean closed = false;
 
   /**
* Constructs a client that can communicate with the Container framework on
@@ -74,6 +75,10 @@ public class XceiverClient extends XceiverClientSpi {
 
   @Override
   public void connect() throws Exception {
+if (closed) {
+  throw new IOException("This channel is not connected.");
+}
+
 if (channel != null && channel.isActive()) {
   throw new IOException("This client is already connected to a host.");
 }
@@ -97,6 +102,18 @@ public class XceiverClient extends XceiverClientSpi {
 channel = b.connect(leader.getHostName(), port).sync().channel();
   }
 
+  public void reconnect() throws IOException {
+try {
+  connect();
+  if (channel == null || !channel.isActive()) {
+throw new IOException("This channel is not connected.");
+  }
+} catch (Exception e) {
+  LOG.error("Error while connecting: ", e);
+  throw new IOException(e);
+}
+  }
+
   /**
* Returns if the exceiver client connects to a server.
*
@@ -109,6 +126,7 @@ public class XceiverClient extends XceiverClientSpi {
 
   @Override
   public void close() {
+closed = true;
 if (group != null) {
   group.shutdownGracefully().awaitUninterruptibly();
 }
@@ -124,7 +142,7 @@ public class XceiverClient extends XceiverClientSpi {
   ContainerProtos.ContainerCommandRequestProto request) throws IOException 
{
 try {
   if ((channel == null) || (!channel.isActive())) {
-throw new IOException("This channel is not connected.");
+reconnect();
   }
   XceiverClientHandler handler =
   channel.pipeline().get(XceiverClientHandler.class);
@@ -160,7 +178,7 @@ public class XceiverClient extends XceiverClientSpi {
   sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request)
   throws IOException, ExecutionException, InterruptedException {
 if ((channel == null) || (!channel.isActive())) {
-  throw new IOException("This channel is not connected.");
+  reconnect();
 }
 XceiverClientHandler handler =
 channel.pipeline().get(XceiverClientHandler.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
 

[06/50] [abbrv] hadoop git commit: YARN-8316. Improved diagnostic message for ATS unavailability for YARN Service. Contributed by Billie Rinaldi

2018-05-30 Thread hanishakoneru
YARN-8316.  Improved diagnostic message for ATS unavailability for YARN Service.
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ff5a402
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ff5a402
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ff5a402

Branch: refs/heads/HDDS-48
Commit: 7ff5a40218241ad2380595175a493794129a7402
Parents: 2d19e7d
Author: Eric Yang 
Authored: Thu May 24 16:26:02 2018 -0400
Committer: Eric Yang 
Committed: Thu May 24 16:26:02 2018 -0400

--
 .../org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java   | 2 +-
 .../org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 072e606..1ceb462 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -400,7 +400,7 @@ public class YarnClientImpl extends YarnClient {
 + e.getMessage());
 return null;
   }
-  throw e;
+  throw new IOException(e);
 } catch (NoClassDefFoundError e) {
   NoClassDefFoundError wrappedError = new NoClassDefFoundError(
   e.getMessage() + ". It appears that the timeline client "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index b84b49c..70ff47b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1159,7 +1159,7 @@ public class TestYarnClient extends 
ParameterizedSchedulerTestBase {
   TimelineClient createTimelineClient() throws IOException, YarnException {
 timelineClient = mock(TimelineClient.class);
 when(timelineClient.getDelegationToken(any(String.class)))
-  .thenThrow(new IOException("Best effort test exception"));
+  .thenThrow(new RuntimeException("Best effort test exception"));
 return timelineClient;
   }
 });
@@ -1175,7 +1175,7 @@ public class TestYarnClient extends 
ParameterizedSchedulerTestBase {
   client.serviceInit(conf);
   client.getTimelineDelegationToken();
   Assert.fail("Get delegation token should have thrown an exception");
-} catch (Exception e) {
+} catch (IOException e) {
   // Success
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain. [Forced Update!]

2018-05-30 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 978eaf102 -> 6cd19b45e (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
index 5b67657..a9b8175 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
@@ -23,23 +23,31 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE;
 import static org.junit.Assert.*;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+import java.io.InputStream;
+
 /**
  * End-to-end testing of Ozone REST operations.
  */
@@ -52,7 +60,9 @@ public class TestOzoneRestWithMiniCluster {
 
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
-  private static OzoneRestClient ozoneClient;
+  private static ClientProtocol client;
+  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
+  private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -62,180 +72,125 @@ public class TestOzoneRestWithMiniCluster {
 conf = new OzoneConfiguration();
 cluster = MiniOzoneCluster.newBuilder(conf).build();
 cluster.waitForClusterToBeReady();
-int port = cluster.getHddsDatanodes().get(0)
-.getDatanodeDetails().getOzoneRestPort();
-ozoneClient = new OzoneRestClient(
-String.format("http://localhost:%d;, port));
-ozoneClient.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
+client = new RpcClient(conf);
   }
 
   @AfterClass
-  public static void shutdown() throws InterruptedException {
+  public static void shutdown() throws InterruptedException, IOException {
 if (cluster != null) {
   cluster.shutdown();
 }
-IOUtils.cleanupWithLogger(null, ozoneClient);
+client.close();
   }
 
   @Test
   public void testCreateAndGetVolume() throws Exception {
-String volumeName = nextId("volume");
-OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", 
"100TB");
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-assertEquals("bilbo", volume.getOwnerName());
-assertNotNull(volume.getQuota());
-assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-volume.getQuota().sizeInBytes());
-volume = ozoneClient.getVolume(volumeName);
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-assertEquals("bilbo", volume.getOwnerName());
-assertNotNull(volume.getQuota());
-assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-volume.getQuota().sizeInBytes());
+createAndGetVolume();
   }
 
   @Test
   public void testCreateAndGetBucket() throws Exception {
-String volumeName = nextId("volume");
-String bucketName = nextId("bucket");
-OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", 
"100TB");
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-

[20/50] [abbrv] hadoop git commit: MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan.

2018-05-30 Thread hanishakoneru
MAPREDUCE-7097. MapReduce JHS should honor 
yarn.webapp.filter-entity-list-by-user. Contributed by  Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cbe57c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cbe57c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cbe57c

Branch: refs/heads/HDDS-48
Commit: 88cbe57c069a1d2dd3bfb32e3ad742566470a10b
Parents: d14e26b
Author: Rohith Sharma K S 
Authored: Mon May 28 12:45:07 2018 +0530
Committer: Rohith Sharma K S 
Committed: Mon May 28 14:05:49 2018 +0530

--
 .../mapreduce/v2/hs/webapp/HsJobBlock.java  | 18 ++-
 .../mapreduce/v2/hs/webapp/TestHsJobBlock.java  | 20 ++--
 .../apache/hadoop/yarn/webapp/Controller.java   |  4 
 .../org/apache/hadoop/yarn/webapp/View.java | 24 +---
 4 files changed, 55 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 18040f0..9b845cd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -27,6 +27,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import java.util.Date;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -39,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
@@ -56,9 +60,14 @@ import com.google.inject.Inject;
  */
 public class HsJobBlock extends HtmlBlock {
   final AppContext appContext;
+  private UserGroupInformation ugi;
+  private boolean isFilterAppListByUserEnabled;
 
-  @Inject HsJobBlock(AppContext appctx) {
+  @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) {
+super(ctx);
 appContext = appctx;
+isFilterAppListByUserEnabled = conf
+.getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
   }
 
   /*
@@ -78,6 +87,13 @@ public class HsJobBlock extends HtmlBlock {
   html.p().__("Sorry, ", jid, " not found.").__();
   return;
 }
+ugi = getCallerUGI();
+if (isFilterAppListByUserEnabled && ugi != null
+&& !j.checkAccess(ugi, JobACL.VIEW_JOB)) {
+  html.p().__("Sorry, ", jid, " could not be viewed for '",
+  ugi.getUserName(), "'.").__();
+  return;
+}
 if(j instanceof UnparsedJob) {
   final int taskCount = j.getTotalMaps() + j.getTotalReduces();
   UnparsedJob oversizedJob = (UnparsedJob) j;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
index 7fa238e..48e3d3b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
+++ 

[17/50] [abbrv] hadoop git commit: YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

2018-05-30 Thread hanishakoneru
YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

Change-Id: Ieea6f3eeb83c90cd74233fea896f0fcd0f325d5f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24c842d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24c842d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24c842d

Branch: refs/heads/HDDS-48
Commit: f24c842d52e166e8566337ef93c96438f1c870d8
Parents: 8605a38
Author: Wangda Tan 
Authored: Fri May 25 21:53:20 2018 -0700
Committer: Wangda Tan 
Committed: Fri May 25 21:53:20 2018 -0700

--
 .../server/resourcemanager/ResourceManager.java |   1 +
 .../scheduler/AbstractYarnScheduler.java|   5 +
 .../scheduler/ResourceScheduler.java|   5 +
 .../scheduler/capacity/CapacityScheduler.java   |  31 -
 .../capacity/CapacitySchedulerMetrics.java  | 119 +++
 .../TestCapacitySchedulerMetrics.java   | 110 +
 6 files changed, 269 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 05745ec..c533111 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1216,6 +1216,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   void reinitialize(boolean initialize) {
 ClusterMetrics.destroy();
 QueueMetrics.clearQueueMetrics();
+getResourceScheduler().resetSchedulerMetrics();
 if (initialize) {
   resetRMContext();
   createAndInitActiveServices(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index b2747f7..18c7b4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1464,4 +1464,9 @@ public abstract class AbstractYarnScheduler
   SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
 return false;
   }
+
+  @Override
+  public void resetSchedulerMetrics() {
+// reset scheduler metrics
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index 5a56ac7..dcb6edd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ 

[11/50] [abbrv] hadoop git commit: HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.

2018-05-30 Thread hanishakoneru
HADOOP-15473. Configure serialFilter in KeyProvider to avoid 
UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02322de3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02322de3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02322de3

Branch: refs/heads/HDDS-48
Commit: 02322de3f95ba78a22c057037ef61aa3ab1d3824
Parents: 8d5509c
Author: Xiao Chen 
Authored: Fri May 25 09:08:15 2018 -0700
Committer: Xiao Chen 
Committed: Fri May 25 09:10:51 2018 -0700

--
 .../apache/hadoop/crypto/key/KeyProvider.java   | 18 +++
 .../fs/CommonConfigurationKeysPublic.java   |  7 ++
 .../src/main/resources/core-default.xml | 23 
 3 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 5d670e5..050540b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 import javax.crypto.KeyGenerator;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER;
+
 /**
  * A provider of secret key material for Hadoop applications. Provides an
  * abstraction to separate key storage from users of encryption. It
@@ -61,6 +63,14 @@ public abstract class KeyProvider {
   CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY;
   public static final int DEFAULT_BITLENGTH = CommonConfigurationKeysPublic.
   HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_DEFAULT;
+  public static final String JCEKS_KEY_SERIALFILTER_DEFAULT =
+  "java.lang.Enum;"
+  + "java.security.KeyRep;"
+  + "java.security.KeyRep$Type;"
+  + "javax.crypto.spec.SecretKeySpec;"
+  + "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;"
+  + "!*";
+  public static final String JCEKS_KEY_SERIAL_FILTER = 
"jceks.key.serialFilter";
 
   private final Configuration conf;
 
@@ -394,6 +404,14 @@ public abstract class KeyProvider {
*/
   public KeyProvider(Configuration conf) {
 this.conf = new Configuration(conf);
+// Added for HADOOP-15473. Configured serialFilter property fixes
+// java.security.UnrecoverableKeyException in JDK 8u171.
+if(System.getProperty(JCEKS_KEY_SERIAL_FILTER) == null) {
+  String serialFilter =
+  conf.get(HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER,
+  JCEKS_KEY_SERIALFILTER_DEFAULT);
+  System.setProperty(JCEKS_KEY_SERIAL_FILTER, serialFilter);
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8837cfb..9e0ba20 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -662,6 +662,13 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER =
+  "hadoop.security.crypto.jceks.key.serialfilter";
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
 "hadoop.security.crypto.buffer.size";
   /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index fad2985..9564587 100644
--- 

[26/50] [abbrv] hadoop git commit: HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, 
#testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fdc993a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fdc993a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fdc993a

Branch: refs/heads/HDDS-48
Commit: 8fdc993a993728c65084d7dc3ac469059cb1f603
Parents: 9dbf4f0
Author: Inigo Goiri 
Authored: Mon May 28 16:45:42 2018 -0700
Committer: Inigo Goiri 
Committed: Mon May 28 16:45:42 2018 -0700

--
 .../org/apache/hadoop/tools/TestHadoopArchiveLogs.java  | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdc993a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
--
diff --git 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
index 2ddd4c5..a1b662c 100644
--- 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+++ 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -278,7 +279,7 @@ public class TestHadoopArchiveLogs {
 hal.generateScript(localScript);
 Assert.assertTrue(localScript.exists());
 String script = IOUtils.toString(localScript.toURI());
-String[] lines = script.split(System.lineSeparator());
+String[] lines = script.split("\n");
 Assert.assertEquals(22, lines.length);
 Assert.assertEquals("#!/bin/bash", lines[0]);
 Assert.assertEquals("set -e", lines[1]);
@@ -368,7 +369,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(dirPrepared);
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 // Throw a file in the dir
 Path dummyFile = new Path(workingDir, "dummy.txt");
@@ -381,7 +383,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertTrue(fs.exists(dummyFile));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 // -force is true and the dir exists, so it will recreate it and the dummy
 // won't exist anymore
@@ -390,7 +393,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(dirPrepared);
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 Assert.assertFalse(fs.exists(dummyFile));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

2018-05-30 Thread hanishakoneru
YARN-8339. Service AM should localize static/archive resource types to 
container working directory instead of 'resources'. (Suma Shivaprasad via 
wangda)

Change-Id: I9f8e8f621650347f6c2f9e3420edee9eb2f356a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3061bfcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3061bfcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3061bfcd

Branch: refs/heads/HDDS-48
Commit: 3061bfcde53210d2032df3814243498b27a997b7
Parents: 3c75f8e
Author: Wangda Tan 
Authored: Tue May 29 09:23:11 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 29 09:23:11 2018 -0700

--
 .../org/apache/hadoop/yarn/service/provider/ProviderUtils.java | 3 +--
 .../apache/hadoop/yarn/service/provider/TestProviderUtils.java | 6 +++---
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index 1ad5fd8..ac90992 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -298,8 +298,7 @@ public class ProviderUtils implements YarnServiceConstants {
 destFile = new Path(staticFile.getDestFile());
   }
 
-  String symlink = APP_RESOURCES_DIR + "/" + destFile.getName();
-  addLocalResource(launcher, symlink, localResource, destFile);
+  addLocalResource(launcher, destFile.getName(), localResource, destFile);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
index 6e8bc43..5d794d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
@@ -154,11 +154,11 @@ public class TestProviderUtils {
 
 ProviderUtils.handleStaticFilesForLocalization(launcher, sfs,
 compLaunchCtx);
-
Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"),
+Mockito.verify(launcher).addLocalResource(Mockito.eq("destFile1"),
 any(LocalResource.class));
 Mockito.verify(launcher).addLocalResource(
-Mockito.eq("resources/destFile_2"), any(LocalResource.class));
+Mockito.eq("destFile_2"), any(LocalResource.class));
 Mockito.verify(launcher).addLocalResource(
-Mockito.eq("resources/sourceFile4"), any(LocalResource.class));
+Mockito.eq("sourceFile4"), any(LocalResource.class));
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for 
TestDFSAdminWithHA. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8197b9b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8197b9b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8197b9b5

Branch: refs/heads/HDDS-48
Commit: 8197b9b56040113806bdf328bbee68e95dd0aadd
Parents: e44c084
Author: Inigo Goiri 
Authored: Wed May 30 10:02:19 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 30 10:13:52 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java | 10 --
 .../org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java  |  4 +++-
 2 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8197b9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 1005f7f..f1f74dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -49,6 +49,7 @@ public class MiniQJMHACluster {
 private int numNNs = 2;
 private final MiniDFSCluster.Builder dfsBuilder;
 private boolean forceRemoteEditsOnly = false;
+private String baseDir;
 
 public Builder(Configuration conf) {
   this.conf = conf;
@@ -69,6 +70,11 @@ public class MiniQJMHACluster {
   this.startOpt = startOpt;
 }
 
+public Builder baseDir(String d) {
+  this.baseDir = d;
+  return this;
+}
+
 public Builder setNumNameNodes(int nns) {
   this.numNNs = nns;
   return this;
@@ -104,8 +110,8 @@ public class MiniQJMHACluster {
 basePort = 1 + RANDOM.nextInt(1000) * 4;
 LOG.info("Set MiniQJMHACluster basePort to " + basePort);
 // start 3 journal nodes
-journalCluster = new MiniJournalCluster.Builder(conf).format(true)
-.build();
+journalCluster = new MiniJournalCluster.Builder(conf)
+.baseDir(builder.baseDir).format(true).build();
 journalCluster.waitActive();
 journalCluster.setNamenodeSharedEditsConf(NAMESERVICE);
 URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8197b9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index b21084e..aa4d481 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
 
@@ -85,7 +86,8 @@ public class TestDFSAdminWithHA {
 conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
 security);
-cluster = new MiniQJMHACluster.Builder(conf).build();
+String baseDir = GenericTestUtils.getRandomizedTempPath();
+cluster = new MiniQJMHACluster.Builder(conf).baseDir(baseDir).build();
 setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
 cluster.getDfsCluster().getNameNode(1).getHostAndPort());
 cluster.getDfsCluster().getNameNode(0).getHostAndPort();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HADOOP-15494. TestRawLocalFileSystemContract fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HADOOP-15494. TestRawLocalFileSystemContract fails on Windows.
Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddfe796
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddfe796
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddfe796

Branch: refs/heads/HDDS-48
Commit: bddfe796f2f992fc1dcc8a1dd44d64ff2b3c9cf4
Parents: 86bc642
Author: Steve Loughran 
Authored: Fri May 25 11:12:47 2018 +0100
Committer: Steve Loughran 
Committed: Fri May 25 11:12:47 2018 +0100

--
 .../java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe796/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index ebf9ea7..908e330 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -42,7 +42,7 @@ public class TestRawLocalFileSystemContract extends 
FileSystemContractBaseTest {
   private static final Logger LOG =
   LoggerFactory.getLogger(TestRawLocalFileSystemContract.class);
   private final static Path TEST_BASE_DIR =
-  new Path(GenericTestUtils.getTempPath(""));
+  new Path(GenericTestUtils.getRandomizedTestDir().getAbsolutePath());
 
   @Before
   public void setUp() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d19e7d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d19e7d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d19e7d0

Branch: refs/heads/HDDS-48
Commit: 2d19e7d08f031341078a36fee74860c58de02993
Parents: c9b63de
Author: Xiaoyu Yao 
Authored: Thu May 24 11:10:30 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu May 24 11:10:30 2018 -0700

--
 .../statemachine/DatanodeStateMachine.java  |   3 -
 .../commandhandler/ContainerReportHandler.java  | 114 ---
 .../states/endpoint/HeartbeatEndpointTask.java  |   5 -
 .../protocol/commands/SendContainerCommand.java |  80 -
 .../StorageContainerDatanodeProtocol.proto  |  16 ++-
 .../container/replication/InProgressPool.java   |  57 --
 .../scm/server/SCMDatanodeProtocolServer.java   |   7 --
 7 files changed, 7 insertions(+), 275 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index a16bfdc..a8fe494 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -26,8 +26,6 @@ import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .CommandDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-.ContainerReportHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 .DeleteBlocksCommandHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -88,7 +86,6 @@ public class DatanodeStateMachine implements Closeable {
  // When we add new handlers just adding a new handler here should do the
  // trick.
 commandDispatcher = CommandDispatcher.newBuilder()
-.addHandler(new ContainerReportHandler())
 .addHandler(new CloseContainerHandler())
 .addHandler(new DeleteBlocksCommandHandler(
 container.getContainerManager(), conf))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
deleted file mode 100644
index fbea290..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.ozone.container.common.statemachine
-

[26/50] [abbrv] hadoop git commit: HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain.

2018-05-30 Thread hanishakoneru
HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b54d194b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b54d194b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b54d194b

Branch: refs/heads/HDDS-48
Commit: b54d194ba91a55227a3f02fe2e13a976feacc3e9
Parents: c84448b
Author: Anu Engineer 
Authored: Fri May 25 15:40:46 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../hadoop/hdds/scm/client/HddsClientUtils.java | 23 +
 .../apache/hadoop/ozone/client/ObjectStore.java |  9 
 .../apache/hadoop/ozone/client/OzoneBucket.java | 24 +
 .../apache/hadoop/ozone/client/OzoneVolume.java | 18 +--
 .../hadoop/ozone/client/rest/RestClient.java| 52 
 .../hadoop/ozone/client/rpc/RpcClient.java  | 46 +++--
 6 files changed, 64 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b54d194b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index bc5f8d6..a6813eb 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -170,6 +170,29 @@ public final class HddsClientUtils {
   }
 
   /**
+   * verifies that bucket / volume name is a valid DNS name.
+   *
+   * @param resourceNames Array of bucket / volume names to be verified.
+   */
+  public static void verifyResourceName(String... resourceNames) {
+for (String resourceName : resourceNames) {
+  HddsClientUtils.verifyResourceName(resourceName);
+}
+  }
+
+  /**
+   * Checks that object parameters passed as reference is not null.
+   *
+   * @param references Array of object references to be checked.
+   * @param 
+   */
+  public static  void checkNotNull(T... references) {
+for (T ref: references) {
+  Preconditions.checkNotNull(ref);
+}
+  }
+
+  /**
* Returns the cache value to be used for list calls.
* @param conf Configuration object
* @return list cache size

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b54d194b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index d8b3011..c5f0689 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -63,8 +63,6 @@ public class ObjectStore {
* @throws IOException
*/
   public void createVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.createVolume(volumeName);
   }
 
@@ -76,9 +74,6 @@ public class ObjectStore {
*/
   public void createVolume(String volumeName, VolumeArgs volumeArgs)
   throws IOException {
-Preconditions.checkNotNull(volumeName);
-Preconditions.checkNotNull(volumeArgs);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.createVolume(volumeName, volumeArgs);
   }
 
@@ -89,8 +84,6 @@ public class ObjectStore {
* @throws IOException
*/
   public OzoneVolume getVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 OzoneVolume volume = proxy.getVolumeDetails(volumeName);
 return volume;
   }
@@ -150,8 +143,6 @@ public class ObjectStore {
* @throws IOException
*/
   public void deleteVolume(String volumeName) throws IOException {
-Preconditions.checkNotNull(volumeName);
-HddsClientUtils.verifyResourceName(volumeName);
 proxy.deleteVolume(volumeName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b54d194b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 5df0254..2f3cff6 100644
--- 

[09/50] [abbrv] hadoop git commit: MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan.

2018-05-30 Thread hanishakoneru
MAPREDUCE-7097. MapReduce JHS should honor 
yarn.webapp.filter-entity-list-by-user. Contributed by  Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3352fa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3352fa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3352fa1

Branch: refs/heads/HDDS-48
Commit: d3352fa155cfb124ad82be63db29c73ec430be92
Parents: fa2ae44
Author: Rohith Sharma K S 
Authored: Mon May 28 12:45:07 2018 +0530
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../mapreduce/v2/hs/webapp/HsJobBlock.java  | 18 ++-
 .../mapreduce/v2/hs/webapp/TestHsJobBlock.java  | 20 ++--
 .../apache/hadoop/yarn/webapp/Controller.java   |  4 
 .../org/apache/hadoop/yarn/webapp/View.java | 24 +---
 4 files changed, 55 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3352fa1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 18040f0..9b845cd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -27,6 +27,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import java.util.Date;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -39,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
@@ -56,9 +60,14 @@ import com.google.inject.Inject;
  */
 public class HsJobBlock extends HtmlBlock {
   final AppContext appContext;
+  private UserGroupInformation ugi;
+  private boolean isFilterAppListByUserEnabled;
 
-  @Inject HsJobBlock(AppContext appctx) {
+  @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) {
+super(ctx);
 appContext = appctx;
+isFilterAppListByUserEnabled = conf
+.getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
   }
 
   /*
@@ -78,6 +87,13 @@ public class HsJobBlock extends HtmlBlock {
   html.p().__("Sorry, ", jid, " not found.").__();
   return;
 }
+ugi = getCallerUGI();
+if (isFilterAppListByUserEnabled && ugi != null
+&& !j.checkAccess(ugi, JobACL.VIEW_JOB)) {
+  html.p().__("Sorry, ", jid, " could not be viewed for '",
+  ugi.getUserName(), "'.").__();
+  return;
+}
 if(j instanceof UnparsedJob) {
   final int taskCount = j.getTotalMaps() + j.getTotalReduces();
   UnparsedJob oversizedJob = (UnparsedJob) j;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3352fa1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
index 7fa238e..48e3d3b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
+++ 

[35/50] [abbrv] hadoop git commit: YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith Sharma K S

2018-05-30 Thread hanishakoneru
YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith 
Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f28ae48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f28ae48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f28ae48

Branch: refs/heads/HDDS-48
Commit: 3f28ae48ec2d0b425171c7047df8388b3303920d
Parents: 9db9913
Author: Billie Rinaldi 
Authored: Wed May 30 12:37:01 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 28 +++-
 .../hadoop/yarn/service/ServiceClientTest.java  | 18 -
 .../yarn/service/client/ServiceClient.java  |  2 ++
 3 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f28ae48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 46c9abe..578273c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -641,20 +641,24 @@ public class ApiServer {
   private Response startService(String appName,
   final UserGroupInformation ugi) throws IOException,
   InterruptedException {
-ugi.doAs(new PrivilegedExceptionAction() {
-  @Override
-  public Void run() throws YarnException, IOException {
-ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-sc.actionStart(appName);
-sc.close();
-return null;
-  }
-});
+ApplicationId appId =
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override public ApplicationId run()
+  throws YarnException, IOException {
+ServiceClient sc = getServiceClient();
+sc.init(YARN_CONFIG);
+sc.start();
+sc.actionStart(appName);
+ApplicationId appId = sc.getAppId(appName);
+sc.close();
+return appId;
+  }
+});
 LOG.info("Successfully started service " + appName);
 ServiceStatus status = new ServiceStatus();
-status.setDiagnostics("Service " + appName + " is successfully started.");
+status.setDiagnostics(
+"Service " + appName + " is successfully started with ApplicationId: "
++ appId);
 status.setState(ServiceState.ACCEPTED);
 return formatResponse(Status.OK, status);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f28ae48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
index 75b9486..81be750 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
@@ -34,8 +34,10 @@ import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
@@ -50,6 +52,8 @@ public class ServiceClientTest extends ServiceClient {
   private Service goodServiceStatus = buildLiveGoodService();
   private boolean initialized;
   private Set expectedInstances 

[40/50] [abbrv] hadoop git commit: HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for 
TestDFSAdminWithHA. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61cb0f88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61cb0f88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61cb0f88

Branch: refs/heads/HDDS-48
Commit: 61cb0f88d28da9107c12319fe40f0cda24d828d9
Parents: ce1806b
Author: Inigo Goiri 
Authored: Wed May 30 10:02:19 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java | 10 --
 .../org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java  |  4 +++-
 2 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61cb0f88/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 1005f7f..f1f74dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -49,6 +49,7 @@ public class MiniQJMHACluster {
 private int numNNs = 2;
 private final MiniDFSCluster.Builder dfsBuilder;
 private boolean forceRemoteEditsOnly = false;
+private String baseDir;
 
 public Builder(Configuration conf) {
   this.conf = conf;
@@ -69,6 +70,11 @@ public class MiniQJMHACluster {
   this.startOpt = startOpt;
 }
 
+public Builder baseDir(String d) {
+  this.baseDir = d;
+  return this;
+}
+
 public Builder setNumNameNodes(int nns) {
   this.numNNs = nns;
   return this;
@@ -104,8 +110,8 @@ public class MiniQJMHACluster {
 basePort = 1 + RANDOM.nextInt(1000) * 4;
 LOG.info("Set MiniQJMHACluster basePort to " + basePort);
 // start 3 journal nodes
-journalCluster = new MiniJournalCluster.Builder(conf).format(true)
-.build();
+journalCluster = new MiniJournalCluster.Builder(conf)
+.baseDir(builder.baseDir).format(true).build();
 journalCluster.waitActive();
 journalCluster.setNamenodeSharedEditsConf(NAMESERVICE);
 URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61cb0f88/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index b21084e..aa4d481 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
 
@@ -85,7 +86,8 @@ public class TestDFSAdminWithHA {
 conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
 security);
-cluster = new MiniQJMHACluster.Builder(conf).build();
+String baseDir = GenericTestUtils.getRandomizedTempPath();
+cluster = new MiniQJMHACluster.Builder(conf).baseDir(baseDir).build();
 setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
 cluster.getDfsCluster().getNameNode(1).getHostAndPort());
 cluster.getDfsCluster().getNameNode(0).getHostAndPort();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)

2018-05-30 Thread hanishakoneru
YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9686584f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9686584f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9686584f

Branch: refs/heads/HDDS-48
Commit: 9686584f919df716b415c1477572b4c31752c872
Parents: 4772e79
Author: Haibo Chen 
Authored: Wed May 23 11:29:55 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .gitignore  |   1 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  26 +-
 .../src/main/resources/yarn-default.xml |  67 ++-
 .../src/CMakeLists.txt  |  19 +
 .../CGroupElasticMemoryController.java  | 476 +++
 .../linux/resources/CGroupsHandler.java |   6 +
 .../linux/resources/CGroupsHandlerImpl.java |   6 +-
 .../CGroupsMemoryResourceHandlerImpl.java   |  15 -
 .../linux/resources/DefaultOOMHandler.java  | 254 ++
 .../monitor/ContainersMonitorImpl.java  |  50 ++
 .../executor/ContainerSignalContext.java|  41 ++
 .../native/oom-listener/impl/oom_listener.c | 171 +++
 .../native/oom-listener/impl/oom_listener.h | 102 
 .../oom-listener/impl/oom_listener_main.c   | 104 
 .../oom-listener/test/oom_listener_test_main.cc | 292 
 .../resources/DummyRunnableWithContext.java |  31 ++
 .../TestCGroupElasticMemoryController.java  | 319 +
 .../TestCGroupsMemoryResourceHandlerImpl.java   |   6 +-
 .../linux/resources/TestDefaultOOMHandler.java  | 307 
 .../monitor/TestContainersMonitor.java  |   1 +
 .../TestContainersMonitorResourceChange.java|   3 +-
 .../site/markdown/NodeManagerCGroupsMemory.md   | 133 ++
 22 files changed, 2391 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9686584f/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 934c009..428950b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@
 target
 build
 dependency-reduced-pom.xml
+make-build-debug
 
 # Filesystem contract test options and credentials
 auth-keys.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9686584f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8e56cb8..6d08831 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1440,6 +1440,25 @@ public class YarnConfiguration extends Configuration {
 NM_PREFIX + "vmem-pmem-ratio";
   public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f;
 
+  /** Specifies whether to do memory check on overall usage. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_ENABLED = NM_PREFIX
+  + "elastic-memory-control.enabled";
+  public static final boolean DEFAULT_NM_ELASTIC_MEMORY_CONTROL_ENABLED = 
false;
+
+  /** Specifies the OOM handler code. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER = NM_PREFIX
+  + "elastic-memory-control.oom-handler";
+
+  /** The path to the OOM listener.*/
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH =
+  NM_PREFIX + "elastic-memory-control.oom-listener.path";
+
+  /** Maximum time in seconds to resolve an OOM situation. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC =
+  NM_PREFIX + "elastic-memory-control.timeout-sec";
+  public static final Integer
+  DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC = 5;
+
   /** Number of Virtual CPU Cores which can be allocated for containers.*/
   public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
   public static final int DEFAULT_NM_VCORES = 8;
@@ -2006,13 +2025,6 @@ public class YarnConfiguration extends Configuration {
   /** The path to the Linux container executor.*/
   public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH =
 NM_PREFIX + "linux-container-executor.path";
-  
-  /** 
-   * The UNIX group that the linux-container-executor should run as.
-   * This is intended to be set as part of container-executor.cfg. 
-   */
-  public static final String 

[33/50] [abbrv] hadoop git commit: HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to 
MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9db99135
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9db99135
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9db99135

Branch: refs/heads/HDDS-48
Commit: 9db9913576fb22ad57b2a6e937cb76c34a59801c
Parents: 61cb0f8
Author: Inigo Goiri 
Authored: Wed May 30 10:22:04 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../server/diskbalancer/DiskBalancerTestUtil.java|  5 -
 .../command/TestDiskBalancerCommand.java | 15 +++
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db99135/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
index bd8dbce..fef9c63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 
 import org.slf4j.Logger;
@@ -46,6 +47,7 @@ import org.slf4j.LoggerFactory;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.Random;
 import java.util.UUID;
@@ -307,7 +309,8 @@ public class DiskBalancerTestUtil {
 "need to specify capacities for two storages.");
 
 // Write a file and restart the cluster
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+File basedir = new File(GenericTestUtils.getRandomizedTempPath());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, basedir)
 .numDataNodes(numDatanodes)
 .storageCapacities(storageCapacities)
 .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db99135/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 8266c1f..dee2a90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -615,15 +615,15 @@ public class TestDiskBalancerCommand {
 assertThat(
 outputs.get(3),
 is(allOf(containsString("DISK"),
-containsString(cluster.getInstanceStorageDir(0, 0)
-.getAbsolutePath()),
+containsString(new Path(cluster.getInstanceStorageDir(0, 0)
+.getAbsolutePath()).toString()),
 containsString("0.00"),
 containsString("1.00";
 assertThat(
 outputs.get(4),
 is(allOf(containsString("DISK"),
-containsString(cluster.getInstanceStorageDir(0, 1)
-.getAbsolutePath()),
+containsString(new Path(cluster.getInstanceStorageDir(0, 1)
+.getAbsolutePath()).toString()),
 containsString("0.00"),
 containsString("1.00";
   }
@@ -719,9 +719,7 @@ public class TestDiskBalancerCommand {
   @Test
   public void testPrintFullPathOfPlan()
   throws Exception {
-final Path parent = new Path(
-PathUtils.getTestPath(getClass()),
-GenericTestUtils.getMethodName());
+String parent = GenericTestUtils.getRandomizedTempPath();
 
 MiniDFSCluster 

[46/50] [abbrv] hadoop git commit: YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed by Takanobu Asanuma

2018-05-30 Thread hanishakoneru
YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed 
by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce1806ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce1806ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce1806ba

Branch: refs/heads/HDDS-48
Commit: ce1806ba16fe6cde40863540e1fd26d249e285a5
Parents: 6a6a660
Author: Eric E Payne 
Authored: Wed May 30 16:50:19 2018 +
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../containermanager/container/SlidingWindowRetryPolicy.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce1806ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 36a8b91..9360669 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -85,8 +85,9 @@ public class SlidingWindowRetryPolicy {
* Updates remaining retries and the restart time when
* required in the retryContext.
* 
-   * When failuresValidityInterval is > 0, it also removes time entries from
-   * restartTimes which are outside the validity interval.
+   * When failuresValidityInterval is {@literal >} 0, it also removes time
+   * entries from restartTimes which are outside the validity
+   * interval.
*/
   protected void updateRetryContext(RetryContext retryContext) {
 if (retryContext.containerRetryContext.getFailuresValidityInterval() > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.

2018-05-30 Thread hanishakoneru
YARN-4781. Support intra-queue preemption for fairness ordering policy. 
Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d966c766
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d966c766
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d966c766

Branch: refs/heads/HDDS-48
Commit: d966c766f420fb4fb9b66d55c9266b8cb363e1ab
Parents: e4767d4
Author: Sunil G 
Authored: Mon May 28 16:32:53 2018 +0530
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../FifoIntraQueuePreemptionPlugin.java |  37 ++-
 .../capacity/IntraQueueCandidatesSelector.java  |  40 +++
 .../monitor/capacity/TempAppPerPartition.java   |   9 +
 .../AbstractComparatorOrderingPolicy.java   |   2 -
 ...alCapacityPreemptionPolicyMockFramework.java |  12 +-
 ...yPreemptionPolicyIntraQueueFairOrdering.java | 276 +++
 6 files changed, 366 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d966c766/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 40f333f..12c178c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAPriorityComparator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.IntraQueuePreemptionOrderPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -41,6 +42,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -263,8 +266,17 @@ public class FifoIntraQueuePreemptionPlugin
   Resource queueReassignableResource,
   PriorityQueue orderedByPriority) {
 
-Comparator reverseComp = Collections
-.reverseOrder(new TAPriorityComparator());
+Comparator reverseComp;
+OrderingPolicy queueOrderingPolicy =
+tq.leafQueue.getOrderingPolicy();
+if (queueOrderingPolicy instanceof FairOrderingPolicy
+&& (context.getIntraQueuePreemptionOrderPolicy()
+== IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+  reverseComp = Collections.reverseOrder(
+  new TAFairOrderingComparator(this.rc, clusterResource));
+} else {
+  reverseComp = Collections.reverseOrder(new TAPriorityComparator());
+}
 TreeSet orderedApps = new TreeSet<>(reverseComp);
 
 String partition = tq.partition;
@@ -355,7 +367,16 @@ public class FifoIntraQueuePreemptionPlugin
   TempQueuePerPartition tq, Collection apps,
   Resource clusterResource,
   Map perUserAMUsed) {
-TAPriorityComparator taComparator = new TAPriorityComparator();
+Comparator taComparator;
+OrderingPolicy orderingPolicy =
+tq.leafQueue.getOrderingPolicy();
+if (orderingPolicy instanceof FairOrderingPolicy
+&& 

[23/50] [abbrv] hadoop git commit: HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, 
#testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75ae41ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75ae41ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75ae41ba

Branch: refs/heads/HDDS-48
Commit: 75ae41ba89736a1916fb5158402cc0756e736b78
Parents: a2b3bde
Author: Inigo Goiri 
Authored: Mon May 28 16:45:42 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../org/apache/hadoop/tools/TestHadoopArchiveLogs.java  | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ae41ba/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
--
diff --git 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
index 2ddd4c5..a1b662c 100644
--- 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+++ 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -278,7 +279,7 @@ public class TestHadoopArchiveLogs {
 hal.generateScript(localScript);
 Assert.assertTrue(localScript.exists());
 String script = IOUtils.toString(localScript.toURI());
-String[] lines = script.split(System.lineSeparator());
+String[] lines = script.split("\n");
 Assert.assertEquals(22, lines.length);
 Assert.assertEquals("#!/bin/bash", lines[0]);
 Assert.assertEquals("set -e", lines[1]);
@@ -368,7 +369,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(dirPrepared);
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 // Throw a file in the dir
 Path dummyFile = new Path(workingDir, "dummy.txt");
@@ -381,7 +383,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertTrue(fs.exists(dummyFile));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 // -force is true and the dir exists, so it will recreate it and the dummy
 // won't exist anymore
@@ -390,7 +393,8 @@ public class TestHadoopArchiveLogs {
 Assert.assertTrue(dirPrepared);
 Assert.assertTrue(fs.exists(workingDir));
 Assert.assertEquals(
-new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+!Shell.WINDOWS),
 fs.getFileStatus(workingDir).getPermission());
 Assert.assertFalse(fs.exists(dummyFile));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 e0838a468 -> 978eaf102


HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. 
Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bff591e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bff591e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bff591e

Branch: refs/heads/HDDS-48
Commit: 2bff591e361f3b8daeabae5717ea1b4bf11de9b4
Parents: b54d194
Author: Inigo Goiri 
Authored: Fri May 25 19:43:33 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff591e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 50d1e75..6da46de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,7 +41,9 @@ public class TestHDFSFileSystemContract extends 
FileSystemContractBaseTest {
 Configuration conf = new HdfsConfiguration();
 conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
 FileSystemContractBaseTest.TEST_UMASK);
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+File basedir = GenericTestUtils.getRandomizedTestDir();
+cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2)
+.build();
 fs = cluster.getFileSystem();
 defaultWorkingDirectory = "/user/" + 
UserGroupInformation.getCurrentUser().getShortUserName();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia.

2018-05-30 Thread hanishakoneru
HDDS-96. Add an option in ozone script to generate a site file with minimally 
required ozone configs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33beaefb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33beaefb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33beaefb

Branch: refs/heads/HDDS-48
Commit: 33beaefb1151e7870c4ba8460870ba3f9cfc83a1
Parents: b5d7fab
Author: Anu Engineer 
Authored: Fri May 25 13:06:14 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../hadoop/hdds/conf/OzoneConfiguration.java|   6 +-
 hadoop-ozone/common/src/main/bin/ozone  |   4 +
 ...TestGenerateOzoneRequiredConfigurations.java | 100 +++
 .../GenerateOzoneRequiredConfigurations.java| 174 +++
 .../hadoop/ozone/genconf/package-info.java  |  24 +++
 5 files changed, 305 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33beaefb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index f07718c..36d953c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -137,7 +137,7 @@ public class OzoneConfiguration extends Configuration {
 
 @Override
 public String toString() {
-  return this.getName() + " " + this.getValue() + this.getTag();
+  return this.getName() + " " + this.getValue() + " " + this.getTag();
 }
 
 @Override
@@ -152,11 +152,11 @@ public class OzoneConfiguration extends Configuration {
 }
   }
 
-  public static void activate(){
+  public static void activate() {
 // adds the default resources
 Configuration.addDefaultResource("hdfs-default.xml");
 Configuration.addDefaultResource("hdfs-site.xml");
 Configuration.addDefaultResource("ozone-default.xml");
 Configuration.addDefaultResource("ozone-site.xml");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33beaefb/hadoop-ozone/common/src/main/bin/ozone
--
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index 00261c7..6843bdd 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -47,6 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager 
service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container 
Manager "
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "genconf" client "generate minimally required ozone 
configs and output to ozone-site.xml in specified path"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -118,6 +119,9 @@ function ozonecmd_case
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;
+genconf)
+  
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations
+;;
 *)
   HADOOP_CLASSNAME="${subcmd}"
   if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33beaefb/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
new file mode 100644
index 000..82582a6
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, 

[43/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ac8235/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 2d88621..f5fe46a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -33,7 +34,7 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.lease.Lease;
 import org.apache.hadoop.ozone.lease.LeaseException;
@@ -368,11 +369,12 @@ public class ContainerMapping implements Mapping {
* @param reports Container report
*/
   @Override
-  public void processContainerReports(ContainerReportsRequestProto reports)
+  public void processContainerReports(DatanodeDetails datanodeDetails,
+  ContainerReportsProto reports)
   throws IOException {
 List
 containerInfos = reports.getReportsList();
-containerSupervisor.handleContainerReport(reports);
+containerSupervisor.handleContainerReport(datanodeDetails, reports);
 for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
 containerInfos) {
   byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
@@ -402,7 +404,7 @@ public class ContainerMapping implements Mapping {
   // Container not found in our container db.
   LOG.error("Error while processing container report from datanode :" +
   " {}, for container: {}, reason: container doesn't exist in" 
+
-  "container database.", reports.getDatanodeDetails(),
+  "container database.", datanodeDetails,
   datanodeState.getContainerID());
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ac8235/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index f560174..ee8e344 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -16,10 +16,11 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -98,7 +99,8 @@ public interface Mapping extends Closeable {
*
* @param reports Container report
*/
-  void processContainerReports(ContainerReportsRequestProto reports)
+  void processContainerReports(DatanodeDetails datanodeDetails,
+   ContainerReportsProto reports)
   throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ac8235/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
index c14303f..5bd0574 100644
--- 

[11/50] [abbrv] hadoop git commit: YARN-8316. Improved diagnostic message for ATS unavailability for YARN Service. Contributed by Billie Rinaldi

2018-05-30 Thread hanishakoneru
YARN-8316.  Improved diagnostic message for ATS unavailability for YARN Service.
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5baf3804
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5baf3804
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5baf3804

Branch: refs/heads/HDDS-48
Commit: 5baf380467d72a28c19d3800ad4426e4d263f38c
Parents: 1941f45
Author: Eric Yang 
Authored: Thu May 24 16:26:02 2018 -0400
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java   | 2 +-
 .../org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5baf3804/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 072e606..1ceb462 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -400,7 +400,7 @@ public class YarnClientImpl extends YarnClient {
 + e.getMessage());
 return null;
   }
-  throw e;
+  throw new IOException(e);
 } catch (NoClassDefFoundError e) {
   NoClassDefFoundError wrappedError = new NoClassDefFoundError(
   e.getMessage() + ". It appears that the timeline client "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5baf3804/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index b84b49c..70ff47b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1159,7 +1159,7 @@ public class TestYarnClient extends 
ParameterizedSchedulerTestBase {
   TimelineClient createTimelineClient() throws IOException, YarnException {
 timelineClient = mock(TimelineClient.class);
 when(timelineClient.getDelegationToken(any(String.class)))
-  .thenThrow(new IOException("Best effort test exception"));
+  .thenThrow(new RuntimeException("Best effort test exception"));
 return timelineClient;
   }
 });
@@ -1175,7 +1175,7 @@ public class TestYarnClient extends 
ParameterizedSchedulerTestBase {
   client.serviceInit(conf);
   client.getTimelineDelegationToken();
   Assert.fail("Get delegation token should have thrown an exception");
-} catch (Exception e) {
+} catch (IOException e) {
   // Success
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.

2018-05-30 Thread hanishakoneru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1d0769/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
index 5b67657..a9b8175 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
@@ -23,23 +23,31 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE;
 import static org.junit.Assert.*;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+import java.io.InputStream;
+
 /**
  * End-to-end testing of Ozone REST operations.
  */
@@ -52,7 +60,9 @@ public class TestOzoneRestWithMiniCluster {
 
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
-  private static OzoneRestClient ozoneClient;
+  private static ClientProtocol client;
+  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
+  private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -62,180 +72,125 @@ public class TestOzoneRestWithMiniCluster {
 conf = new OzoneConfiguration();
 cluster = MiniOzoneCluster.newBuilder(conf).build();
 cluster.waitForClusterToBeReady();
-int port = cluster.getHddsDatanodes().get(0)
-.getDatanodeDetails().getOzoneRestPort();
-ozoneClient = new OzoneRestClient(
-String.format("http://localhost:%d;, port));
-ozoneClient.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
+client = new RpcClient(conf);
   }
 
   @AfterClass
-  public static void shutdown() throws InterruptedException {
+  public static void shutdown() throws InterruptedException, IOException {
 if (cluster != null) {
   cluster.shutdown();
 }
-IOUtils.cleanupWithLogger(null, ozoneClient);
+client.close();
   }
 
   @Test
   public void testCreateAndGetVolume() throws Exception {
-String volumeName = nextId("volume");
-OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", 
"100TB");
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-assertEquals("bilbo", volume.getOwnerName());
-assertNotNull(volume.getQuota());
-assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-volume.getQuota().sizeInBytes());
-volume = ozoneClient.getVolume(volumeName);
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-assertEquals("bilbo", volume.getOwnerName());
-assertNotNull(volume.getQuota());
-assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-volume.getQuota().sizeInBytes());
+createAndGetVolume();
   }
 
   @Test
   public void testCreateAndGetBucket() throws Exception {
-String volumeName = nextId("volume");
-String bucketName = nextId("bucket");
-OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", 
"100TB");
-assertNotNull(volume);
-assertEquals(volumeName, volume.getVolumeName());
-assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-assertEquals("bilbo", volume.getOwnerName());
-

[28/50] [abbrv] hadoop git commit: HADOOP-15494. TestRawLocalFileSystemContract fails on Windows. Contributed by Anbang Hu.

2018-05-30 Thread hanishakoneru
HADOOP-15494. TestRawLocalFileSystemContract fails on Windows.
Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00260815
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00260815
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00260815

Branch: refs/heads/HDDS-48
Commit: 00260815db25911ebe2d55be34b022908a80fb97
Parents: 6caecf2
Author: Steve Loughran 
Authored: Fri May 25 11:12:47 2018 +0100
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00260815/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index ebf9ea7..908e330 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -42,7 +42,7 @@ public class TestRawLocalFileSystemContract extends 
FileSystemContractBaseTest {
   private static final Logger LOG =
   LoggerFactory.getLogger(TestRawLocalFileSystemContract.class);
   private final static Path TEST_BASE_DIR =
-  new Path(GenericTestUtils.getTempPath(""));
+  new Path(GenericTestUtils.getRandomizedTestDir().getAbsolutePath());
 
   @Before
   public void setUp() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

2018-05-30 Thread hanishakoneru
HDDS-81. Moving ContainerReport inside Datanode heartbeat.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27ac8235
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27ac8235
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27ac8235

Branch: refs/heads/HDDS-48
Commit: 27ac82359e9c76aa30775542721f8fdd32b7edd7
Parents: 619826b
Author: Anu Engineer 
Authored: Tue May 29 12:40:27 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../common/impl/ContainerManagerImpl.java   |  22 +-
 .../common/impl/StorageLocationReport.java  |   8 +-
 .../common/interfaces/ContainerManager.java |   8 +-
 .../statemachine/DatanodeStateMachine.java  |   7 +-
 .../common/statemachine/StateContext.java   |  16 +-
 .../CloseContainerCommandHandler.java   | 113 
 .../commandhandler/CloseContainerHandler.java   | 113 
 .../commandhandler/CommandDispatcher.java   |   5 +-
 .../commandhandler/CommandHandler.java  |   8 +-
 .../DeleteBlocksCommandHandler.java |  12 +-
 .../states/endpoint/HeartbeatEndpointTask.java  |  30 +-
 .../states/endpoint/RegisterEndpointTask.java   |  12 +-
 .../container/ozoneimpl/OzoneContainer.java |  10 +-
 .../StorageContainerDatanodeProtocol.java   |  30 +-
 .../protocol/StorageContainerNodeProtocol.java  |  15 +-
 .../commands/CloseContainerCommand.java |  18 +-
 .../protocol/commands/DeleteBlocksCommand.java  |  18 +-
 .../protocol/commands/RegisteredCommand.java|  26 +-
 .../protocol/commands/ReregisterCommand.java|  16 +-
 .../ozone/protocol/commands/SCMCommand.java |   4 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  50 +---
 ...rDatanodeProtocolServerSideTranslatorPB.java |  53 ++--
 .../StorageContainerDatanodeProtocol.proto  | 256 -
 .../ozone/container/common/ScmTestMock.java |  78 ++
 .../hdds/scm/container/ContainerMapping.java|  10 +-
 .../hadoop/hdds/scm/container/Mapping.java  |   6 +-
 .../replication/ContainerSupervisor.java|  13 +-
 .../container/replication/InProgressPool.java   |  15 +-
 .../hdds/scm/node/HeartbeatQueueItem.java   |  14 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  58 ++--
 .../hdds/scm/node/SCMNodeStorageStatMap.java|  14 +-
 .../scm/server/SCMDatanodeProtocolServer.java   | 195 +++--
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  19 +-
 .../hdds/scm/container/MockNodeManager.java |  26 +-
 .../scm/container/TestContainerMapping.java |  24 +-
 .../container/closer/TestContainerCloser.java   |  12 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   6 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  83 +++---
 .../scm/node/TestSCMNodeStorageStatMap.java |  16 +-
 .../ozone/container/common/TestEndPoint.java| 113 ++--
 .../replication/TestContainerSupervisor.java| 275 ---
 .../ReplicationDatanodeStateManager.java| 101 ---
 .../testutils/ReplicationNodeManagerMock.java   |  14 +-
 .../ozone/TestStorageContainerManager.java  |  11 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |  68 ++---
 45 files changed, 706 insertions(+), 1315 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ac8235/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 9355364..af47015 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,11 +35,11 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+.StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.io.IOUtils;
 

[36/50] [abbrv] hadoop git commit: YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

2018-05-30 Thread hanishakoneru
YARN-8339. Service AM should localize static/archive resource types to 
container working directory instead of 'resources'. (Suma Shivaprasad via 
wangda)

Change-Id: I9f8e8f621650347f6c2f9e3420edee9eb2f356a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bf07690
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bf07690
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bf07690

Branch: refs/heads/HDDS-48
Commit: 7bf07690c9ea5bf31efaaf522ec74a823edc5255
Parents: eeab3c6
Author: Wangda Tan 
Authored: Tue May 29 09:23:11 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../org/apache/hadoop/yarn/service/provider/ProviderUtils.java | 3 +--
 .../apache/hadoop/yarn/service/provider/TestProviderUtils.java | 6 +++---
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bf07690/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index 1ad5fd8..ac90992 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -298,8 +298,7 @@ public class ProviderUtils implements YarnServiceConstants {
 destFile = new Path(staticFile.getDestFile());
   }
 
-  String symlink = APP_RESOURCES_DIR + "/" + destFile.getName();
-  addLocalResource(launcher, symlink, localResource, destFile);
+  addLocalResource(launcher, destFile.getName(), localResource, destFile);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bf07690/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
index 6e8bc43..5d794d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
@@ -154,11 +154,11 @@ public class TestProviderUtils {
 
 ProviderUtils.handleStaticFilesForLocalization(launcher, sfs,
 compLaunchCtx);
-
Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"),
+Mockito.verify(launcher).addLocalResource(Mockito.eq("destFile1"),
 any(LocalResource.class));
 Mockito.verify(launcher).addLocalResource(
-Mockito.eq("resources/destFile_2"), any(LocalResource.class));
+Mockito.eq("destFile_2"), any(LocalResource.class));
 Mockito.verify(launcher).addLocalResource(
-Mockito.eq("resources/sourceFile4"), any(LocalResource.class));
+Mockito.eq("sourceFile4"), any(LocalResource.class));
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

2018-05-30 Thread hanishakoneru
HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode 
failover

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4767d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4767d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4767d4c

Branch: refs/heads/HDDS-48
Commit: e4767d4c8c502d87085ce187a968833bc5e9d3c9
Parents: c0a16db
Author: Karthik Palanisamy 
Authored: Mon May 28 19:41:07 2018 +0900
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java   | 2 +-
 .../hadoop-common/src/main/resources/core-default.xml  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4767d4c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index a8c19ab..9295288 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -63,7 +63,7 @@ public abstract class ZKFailoverController {
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = 
"ha.zookeeper.session-timeout.ms";
-  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
+  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 10*1000;
   private static final String ZK_PARENT_ZNODE_KEY = 
"ha.zookeeper.parent-znode";
   public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
   private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4767d4c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 9564587..75acf48 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2168,7 +2168,7 @@
 
 
   ha.zookeeper.session-timeout.ms
-  5000
+  1
   
 The session timeout to use when the ZKFC connects to ZooKeeper.
 Setting this value to a lower value implies that server crashes


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen)

2018-05-30 Thread hanishakoneru
YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6caecf20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6caecf20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6caecf20

Branch: refs/heads/HDDS-48
Commit: 6caecf20f8d1f0a2e357f15c652a4d43dc53adbb
Parents: 3941831
Author: Haibo Chen 
Authored: Thu May 24 17:07:21 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSLeafQueue.java |  31 ++
 .../resourcemanager/scheduler/fair/FSQueue.java |   9 +
 .../scheduler/fair/FairScheduler.java   |  29 +-
 .../scheduler/fair/QueueManager.java| 155 +++--
 .../fair/TestAllocationFileLoaderService.java   | 100 +++---
 .../scheduler/fair/TestQueueManager.java| 337 +++
 7 files changed, 596 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6caecf20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index d8d9051..7a40b6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -87,7 +87,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   private Path allocFile;
   private FileSystem fs;
 
-  private Listener reloadListener;
+  private final Listener reloadListener;
 
   @VisibleForTesting
   long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS;
@@ -95,15 +95,16 @@ public class AllocationFileLoaderService extends 
AbstractService {
   private Thread reloadThread;
   private volatile boolean running = true;
 
-  public AllocationFileLoaderService() {
-this(SystemClock.getInstance());
+  public AllocationFileLoaderService(Listener reloadListener) {
+this(reloadListener, SystemClock.getInstance());
   }
 
   private List defaultPermissions;
 
-  public AllocationFileLoaderService(Clock clock) {
+  public AllocationFileLoaderService(Listener reloadListener, Clock clock) {
 super(AllocationFileLoaderService.class.getName());
 this.clock = clock;
+this.reloadListener = reloadListener;
   }
 
   @Override
@@ -114,6 +115,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   reloadThread = new Thread(() -> {
 while (running) {
   try {
+reloadListener.onCheck();
 long time = clock.getTime();
 long lastModified =
 fs.getFileStatus(allocFile).getModificationTime();
@@ -207,10 +209,6 @@ public class AllocationFileLoaderService extends 
AbstractService {
 return allocPath;
   }
 
-  public synchronized void setReloadListener(Listener reloadListener) {
-this.reloadListener = reloadListener;
-  }
-
   /**
* Updates the allocation list from the allocation config file. This file is
* expected to be in the XML format specified in the design doc.
@@ -351,5 +349,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
   public interface Listener {
 void onReload(AllocationConfiguration info) throws IOException;
+
+void onCheck();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6caecf20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 

[39/50] [abbrv] hadoop git commit: HDDS-114. Ozone Datanode mbean registration fails for StorageLocation. Contributed by Elek, Marton.

2018-05-30 Thread hanishakoneru
HDDS-114. Ozone Datanode mbean registration fails for StorageLocation.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c64d5dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c64d5dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c64d5dd

Branch: refs/heads/HDDS-48
Commit: 7c64d5ddadb6f093ff978eb935b13c96abcd6862
Parents: a0b3b2d
Author: Anu Engineer 
Authored: Tue May 29 13:23:58 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../common/impl/StorageLocationReport.java  | 52 +++-
 .../ContainerLocationManagerMXBean.java |  4 +-
 .../interfaces/StorageLocationReportMXBean.java | 40 +++
 3 files changed, 71 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c64d5dd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 87b9656..061d09b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.
 StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
 StorageContainerDatanodeProtocolProtos.StorageTypeProto;
+import org.apache.hadoop.ozone.container.common.interfaces
+.StorageLocationReportMXBean;
 
 import java.io.IOException;
 
@@ -30,7 +32,8 @@ import java.io.IOException;
  * Storage location stats of datanodes that provide back store for containers.
  *
  */
-public class StorageLocationReport {
+public final class StorageLocationReport implements
+StorageLocationReportMXBean {
 
   private final String id;
   private final boolean failed;
@@ -76,6 +79,11 @@ public class StorageLocationReport {
 return storageLocation;
   }
 
+  @Override
+  public String getStorageTypeName() {
+return storageType.name();
+  }
+
   public StorageType getStorageType() {
 return storageType;
   }
@@ -204,76 +212,76 @@ public class StorageLocationReport {
 /**
  * Sets the storageId.
  *
- * @param id storageId
+ * @param idValue storageId
  * @return StorageLocationReport.Builder
  */
-public Builder setId(String id) {
-  this.id = id;
+public Builder setId(String idValue) {
+  this.id = idValue;
   return this;
 }
 
 /**
  * Sets whether the volume failed or not.
  *
- * @param failed whether volume failed or not
+ * @param failedValue whether volume failed or not
  * @return StorageLocationReport.Builder
  */
-public Builder setFailed(boolean failed) {
-  this.failed = failed;
+public Builder setFailed(boolean failedValue) {
+  this.failed = failedValue;
   return this;
 }
 
 /**
  * Sets the capacity of volume.
  *
- * @param capacity capacity
+ * @param capacityValue capacity
  * @return StorageLocationReport.Builder
  */
-public Builder setCapacity(long capacity) {
-  this.capacity = capacity;
+public Builder setCapacity(long capacityValue) {
+  this.capacity = capacityValue;
   return this;
 }
 /**
  * Sets the scmUsed Value.
  *
- * @param scmUsed storage space used by scm
+ * @param scmUsedValue storage space used by scm
  * @return StorageLocationReport.Builder
  */
-public Builder setScmUsed(long scmUsed) {
-  this.scmUsed = scmUsed;
+public Builder setScmUsed(long scmUsedValue) {
+  this.scmUsed = scmUsedValue;
   return this;
 }
 
 /**
  * Sets the remaining free space value.
  *
- * @param remaining remaining free space
+ * @param remainingValue remaining free space
  * @return StorageLocationReport.Builder
  */
-public Builder setRemaining(long remaining) {
-  this.remaining = remaining;
+public Builder setRemaining(long remainingValue) {
+  this.remaining = remainingValue;
   return this;
 }
 
 /**
  * Sets the storageType.
  *
- * @param storageType type of the storage used
+ * @param storageTypeValue type of the storage used
  * @return StorageLocationReport.Builder
  */
-public Builder setStorageType(StorageType storageType) {
-  

[04/50] [abbrv] hadoop git commit: YARN-8319. More YARN pages need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

2018-05-30 Thread hanishakoneru
YARN-8319. More YARN pages need to honor 
yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee3e3fc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee3e3fc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee3e3fc2

Branch: refs/heads/HDDS-48
Commit: ee3e3fc26fbed945f06d59372dabbe9bcc735209
Parents: d3bc5dc
Author: Rohith Sharma K S 
Authored: Thu May 24 14:19:46 2018 +0530
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 11 +++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../src/main/resources/yarn-default.xml |  2 +-
 .../nodemanager/webapp/NMWebServices.java   | 63 +-
 .../webapp/TestNMWebServicesApps.java   | 68 +---
 .../server/resourcemanager/ClientRMService.java | 10 +--
 .../resourcemanager/webapp/RMWebServices.java   |  8 +--
 .../reader/TimelineReaderWebServices.java   | 33 ++
 8 files changed, 175 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee3e3fc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6d08831..004a59f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -121,6 +121,10 @@ public class YarnConfiguration extends Configuration {
 new DeprecationDelta(RM_ZK_RETRY_INTERVAL_MS,
 CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS),
 });
+Configuration.addDeprecations(new DeprecationDelta[] {
+new DeprecationDelta("yarn.resourcemanager.display.per-user-apps",
+FILTER_ENTITY_LIST_BY_USER)
+});
   }
 
   //Configurations
@@ -3569,11 +3573,16 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS =
   NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts";
 
-  /*
+  /**
* Support to view apps for given user in secure cluster.
+   * @deprecated This field is deprecated for {@link 
#FILTER_ENTITY_LIST_BY_USER}
*/
+  @Deprecated
   public static final String DISPLAY_APPS_FOR_LOGGED_IN_USER =
   RM_PREFIX + "display.per-user-apps";
+
+  public static final String FILTER_ENTITY_LIST_BY_USER =
+  "yarn.webapp.filter-entity-list-by-user";
   public static final boolean DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER =
   false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee3e3fc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index f4d1ac0..b9ba543 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -182,6 +182,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 // Ignore deprecated properties
 configurationPrefixToSkipCompare
 .add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER);
 
 // Allocate for usage
 xmlPropsToSkipCompare = new HashSet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee3e3fc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index da44ccb..c82474c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 

[19/50] [abbrv] hadoop git commit: HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.

2018-05-30 Thread hanishakoneru
HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by 
Yuen-Kuei Hsueh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/146a9d2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/146a9d2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/146a9d2f

Branch: refs/heads/HDDS-48
Commit: 146a9d2f95e4eb8fc161f558207f49e1aa711408
Parents: 75ae41b
Author: Wei-Chiu Chuang 
Authored: Mon May 28 17:32:32 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:25 2018 -0700

--
 .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/146a9d2f/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index b02f34e..17faec2 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -247,9 +247,9 @@ public class KMSACLs implements Runnable, KeyACLs {
 if (blacklist == null) {
   LOG.debug("No blacklist for {}", type.toString());
 } else if (access) {
-  LOG.debug("user is in {}" , blacklist.getAclString());
-} else {
   LOG.debug("user is not in {}" , blacklist.getAclString());
+} else {
+  LOG.debug("user is in {}" , blacklist.getAclString());
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-8362. Bugfix logic in container retries in node manager. Contributed by Chandni Singh

2018-05-30 Thread hanishakoneru
YARN-8362.  Bugfix logic in container retries in node manager.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6bdbf17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6bdbf17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6bdbf17

Branch: refs/heads/HDDS-48
Commit: c6bdbf176c1856f578bbc2cc38a51cef5307e423
Parents: 7c64d5d
Author: Eric Yang 
Authored: Tue May 29 16:56:58 2018 -0400
Committer: Hanisha Koneru 
Committed: Wed May 30 14:00:26 2018 -0700

--
 .../container/ContainerImpl.java|  4 +-
 .../container/SlidingWindowRetryPolicy.java | 62 +++-
 .../container/TestSlidingWindowRetryPolicy.java |  6 ++
 3 files changed, 44 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6bdbf17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index c09c7f1..5527ac4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -1602,8 +1602,10 @@ public class ContainerImpl implements Container {
 }
 container.addDiagnostics(exitEvent.getDiagnosticInfo() + "\n");
   }
-
   if (container.shouldRetry(container.exitCode)) {
+// Updates to the retry context should  be protected from concurrent
+// writes. It should only be called from this transition.
+container.retryPolicy.updateRetryContext(container.windowRetryContext);
 container.storeRetryContext();
 doRelaunch(container,
 container.windowRetryContext.getRemainingRetries(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6bdbf17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 0208879..36a8b91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -42,49 +42,40 @@ public class SlidingWindowRetryPolicy {
 
   public boolean shouldRetry(RetryContext retryContext,
   int errorCode) {
-ContainerRetryContext containerRC = retryContext
-.containerRetryContext;
+ContainerRetryContext containerRC = retryContext.containerRetryContext;
 Preconditions.checkNotNull(containerRC, "container retry context null");
 ContainerRetryPolicy retryPolicy = containerRC.getRetryPolicy();
 if (retryPolicy == ContainerRetryPolicy.RETRY_ON_ALL_ERRORS
 || (retryPolicy == ContainerRetryPolicy.RETRY_ON_SPECIFIC_ERROR_CODES
 && containerRC.getErrorCodes() != null
 && containerRC.getErrorCodes().contains(errorCode))) {
-  if (containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER) {
-return true;
-  }
-  int pendingRetries = calculatePendingRetries(retryContext);
-  updateRetryContext(retryContext, pendingRetries);
-  return pendingRetries > 0;
+  return containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER
+  || calculateRemainingRetries(retryContext) > 0;
 }
 return false;
   }
 

  1   2   3   4   >