hadoop git commit: HADOOP-15294. TestUGILoginFromKeytab fails on Java9

2018-03-13 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk e6de10d0a -> fea16a440


HADOOP-15294. TestUGILoginFromKeytab fails on Java9

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fea16a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fea16a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fea16a44

Branch: refs/heads/trunk
Commit: fea16a440d195f1b4e03a62c9249e3518cea23b5
Parents: e6de10d
Author: Takanobu Asanuma 
Authored: Wed Mar 14 13:38:07 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed Mar 14 13:38:07 2018 +0900

--
 .../java/org/apache/hadoop/security/UserGroupInformation.java   | 5 -
 .../src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java| 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fea16a44/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 003a51c..d0522a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1885,7 +1885,10 @@ public class UserGroupInformation {
 @Override
 public void logout() throws LoginException {
   synchronized(getSubjectLock()) {
-super.logout();
+if (this.getSubject() != null
+&& !this.getSubject().getPrivateCredentials().isEmpty()) {
+  super.logout();
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fea16a44/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
index 204f656..74130cf 100644
--- 
a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
+++ 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
@@ -166,7 +166,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase {
 
 } finally {
   if (loginContext != null && loginContext.getSubject() != null
-  && !loginContext.getSubject().getPrincipals().isEmpty()) {
+  && !loginContext.getSubject().getPrivateCredentials().isEmpty()) {
 loginContext.logout();
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8022. ResourceManager UI cluster/app/ page fails to render. Contributed by Tarun Parimi.

2018-03-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 19521f71d -> c662e688d


YARN-8022. ResourceManager UI cluster/app/ page fails to render. 
Contributed by Tarun Parimi.

(cherry picked from commit e6de10d0a6363bdaf767a7bdac7ad908d7786718)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c662e688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c662e688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c662e688

Branch: refs/heads/branch-3.1
Commit: c662e688d15998da4c6cc327882812c1974313bf
Parents: 19521f7
Author: Rohith Sharma K S 
Authored: Wed Mar 14 09:42:20 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Mar 14 09:48:04 2018 +0530

--
 .../hadoop/yarn/server/webapp/AppBlock.java | 120 +--
 1 file changed, 56 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c662e688/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 4a47bf1..8600b72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -109,8 +109,7 @@ public class AppBlock extends HtmlBlock {
   final GetApplicationReportRequest request =
   GetApplicationReportRequest.newInstance(appID);
   if (callerUGI == null) {
-appReport =
-appBaseProt.getApplicationReport(request).getApplicationReport();
+appReport = getApplicationReport(request);
   } else {
 appReport = callerUGI.doAs(
 new PrivilegedExceptionAction () {
@@ -143,14 +142,19 @@ public class AppBlock extends HtmlBlock {
 try {
   final GetApplicationAttemptsRequest request =
   GetApplicationAttemptsRequest.newInstance(appID);
-  attempts = callerUGI.doAs(
+  if (callerUGI == null) {
+attempts = getApplicationAttemptsReport(request);
+  } else {
+attempts = callerUGI.doAs(
   new PrivilegedExceptionAction>() {
 @Override
-public Collection run() throws Exception 
{
+public Collection run()
+throws Exception {
   return getApplicationAttemptsReport(request);
 }
   });
+  }
 } catch (Exception e) {
   String message =
   "Failed to read the attempts of the application " + appID + ".";
@@ -204,36 +208,55 @@ public class AppBlock extends HtmlBlock {
 String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
 "/cluster/scheduler?openQueues=" + app.getQueue();
 
+generateOverviewTable(app, schedulerPath, webUiType, appReport);
+
+createApplicationMetricsTable(html);
+
+html.__(InfoBlock.class);
+
+generateApplicationTable(html, callerUGI, attempts);
+
+  }
+
+  /**
+   * Generate overview table for app web page.
+   * @param app app info.
+   * @param schedulerPath schedule path.
+   * @param webUiType web ui type.
+   * @param appReport app report.
+   */
+  private void generateOverviewTable(AppInfo app, String schedulerPath,
+  String webUiType, ApplicationReport appReport) {
 ResponseInfo overviewTable = info("Application Overview")
-  .__("User:", schedulerPath, app.getUser())
-  .__("Name:", app.getName())
-  .__("Application Type:", app.getType())
-  .__("Application Tags:",
-app.getApplicationTags() == null ? "" : app.getApplicationTags())
-  .__("Application Priority:", clarifyAppPriority(app.getPriority()))
-  .__(
-"YarnApplicationState:",
-app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
-  .getAppState()))
-  .__("Queue:", schedulerPath, app.getQueue())
-  .__("FinalStatus Reported by AM:",
-clairfyAppFinalStatus(app.getFinalAppStatus()))
-  .__("Started:", Times.format(app.getStartedTime()))
-  .__(
-"Elapsed:",
-StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
-  app.getFinishedTime(
-  .__(
-"Tracking URL:",
-app.getTrackingUrl() == null
-|| 

hadoop git commit: YARN-8022. ResourceManager UI cluster/app/ page fails to render. Contributed by Tarun Parimi.

2018-03-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 76be6cbf6 -> e6de10d0a


YARN-8022. ResourceManager UI cluster/app/ page fails to render. 
Contributed by Tarun Parimi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6de10d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6de10d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6de10d0

Branch: refs/heads/trunk
Commit: e6de10d0a6363bdaf767a7bdac7ad908d7786718
Parents: 76be6cb
Author: Rohith Sharma K S 
Authored: Wed Mar 14 09:42:20 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Mar 14 09:42:20 2018 +0530

--
 .../hadoop/yarn/server/webapp/AppBlock.java | 120 +--
 1 file changed, 56 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6de10d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 4a47bf1..8600b72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -109,8 +109,7 @@ public class AppBlock extends HtmlBlock {
   final GetApplicationReportRequest request =
   GetApplicationReportRequest.newInstance(appID);
   if (callerUGI == null) {
-appReport =
-appBaseProt.getApplicationReport(request).getApplicationReport();
+appReport = getApplicationReport(request);
   } else {
 appReport = callerUGI.doAs(
 new PrivilegedExceptionAction () {
@@ -143,14 +142,19 @@ public class AppBlock extends HtmlBlock {
 try {
   final GetApplicationAttemptsRequest request =
   GetApplicationAttemptsRequest.newInstance(appID);
-  attempts = callerUGI.doAs(
+  if (callerUGI == null) {
+attempts = getApplicationAttemptsReport(request);
+  } else {
+attempts = callerUGI.doAs(
   new PrivilegedExceptionAction>() {
 @Override
-public Collection run() throws Exception 
{
+public Collection run()
+throws Exception {
   return getApplicationAttemptsReport(request);
 }
   });
+  }
 } catch (Exception e) {
   String message =
   "Failed to read the attempts of the application " + appID + ".";
@@ -204,36 +208,55 @@ public class AppBlock extends HtmlBlock {
 String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
 "/cluster/scheduler?openQueues=" + app.getQueue();
 
+generateOverviewTable(app, schedulerPath, webUiType, appReport);
+
+createApplicationMetricsTable(html);
+
+html.__(InfoBlock.class);
+
+generateApplicationTable(html, callerUGI, attempts);
+
+  }
+
+  /**
+   * Generate overview table for app web page.
+   * @param app app info.
+   * @param schedulerPath schedule path.
+   * @param webUiType web ui type.
+   * @param appReport app report.
+   */
+  private void generateOverviewTable(AppInfo app, String schedulerPath,
+  String webUiType, ApplicationReport appReport) {
 ResponseInfo overviewTable = info("Application Overview")
-  .__("User:", schedulerPath, app.getUser())
-  .__("Name:", app.getName())
-  .__("Application Type:", app.getType())
-  .__("Application Tags:",
-app.getApplicationTags() == null ? "" : app.getApplicationTags())
-  .__("Application Priority:", clarifyAppPriority(app.getPriority()))
-  .__(
-"YarnApplicationState:",
-app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
-  .getAppState()))
-  .__("Queue:", schedulerPath, app.getQueue())
-  .__("FinalStatus Reported by AM:",
-clairfyAppFinalStatus(app.getFinalAppStatus()))
-  .__("Started:", Times.format(app.getStartedTime()))
-  .__(
-"Elapsed:",
-StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
-  app.getFinishedTime(
-  .__(
-"Tracking URL:",
-app.getTrackingUrl() == null
-|| app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app
-  

hadoop git commit: HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo Goiri.

2018-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 60feb43b7 -> 6e57ca602


HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo 
Goiri.

(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e57ca60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e57ca60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e57ca60

Branch: refs/heads/branch-2.9
Commit: 6e57ca602904f5a4a9862df377ea8d0f31ebaa21
Parents: 60feb43
Author: Yiqun Lin 
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Mar 14 11:32:50 2018 +0800

--
 .../federation/metrics/StateStoreMetrics.java   |   5 +
 .../driver/StateStoreRecordOperations.java  |  15 -
 .../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
 .../store/driver/impl/StateStoreFileImpl.java   | 109 ++---
 .../driver/impl/StateStoreFileSystemImpl.java   | 128 +++---
 .../driver/impl/StateStoreZooKeeperImpl.java|   6 -
 .../store/driver/TestStateStoreDriverBase.java  |   9 +
 .../store/driver/TestStateStoreFile.java|  12 +
 .../store/driver/TestStateStoreFileBase.java|  47 ++
 .../store/driver/TestStateStoreFileSystem.java  |  14 +-
 10 files changed, 428 insertions(+), 350 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements 
StateStoreMBean {
 writes.resetMinMax();
 removes.resetMinMax();
 failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
 
   /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new 
instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-   QueryResult get(Class clazz, String sub)
-  throws IOException;
-
-  /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
--- 

hadoop git commit: HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo Goiri.

2018-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 95a4665ad -> a10506972


HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo 
Goiri.

(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1050697
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1050697
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1050697

Branch: refs/heads/branch-3.0
Commit: a105069729f35f3c8ff7691fa3d51162677734f9
Parents: 95a4665
Author: Yiqun Lin 
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Mar 14 11:31:35 2018 +0800

--
 .../federation/metrics/StateStoreMetrics.java   |   5 +
 .../driver/StateStoreRecordOperations.java  |  15 -
 .../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
 .../store/driver/impl/StateStoreFileImpl.java   | 109 ++---
 .../driver/impl/StateStoreFileSystemImpl.java   | 128 +++---
 .../driver/impl/StateStoreZooKeeperImpl.java|   6 -
 .../store/driver/TestStateStoreDriverBase.java  |   9 +
 .../store/driver/TestStateStoreFile.java|  12 +
 .../store/driver/TestStateStoreFileBase.java|  47 ++
 .../store/driver/TestStateStoreFileSystem.java  |  14 +-
 10 files changed, 428 insertions(+), 350 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements 
StateStoreMBean {
 writes.resetMinMax();
 removes.resetMinMax();
 failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
 
   /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new 
instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-   QueryResult get(Class clazz, String sub)
-  throws IOException;
-
-  /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
--- 

hadoop git commit: HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo Goiri.

2018-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 36451f2d5 -> b3d56cb83


HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo 
Goiri.

(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d56cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d56cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d56cb8

Branch: refs/heads/branch-2
Commit: b3d56cb83558c797403cb4538d0c21bf097263ce
Parents: 36451f2
Author: Yiqun Lin 
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Mar 14 11:25:08 2018 +0800

--
 .../federation/metrics/StateStoreMetrics.java   |   5 +
 .../driver/StateStoreRecordOperations.java  |  15 -
 .../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
 .../store/driver/impl/StateStoreFileImpl.java   | 109 ++---
 .../driver/impl/StateStoreFileSystemImpl.java   | 128 +++---
 .../driver/impl/StateStoreZooKeeperImpl.java|   6 -
 .../store/driver/TestStateStoreDriverBase.java  |   9 +
 .../store/driver/TestStateStoreFile.java|  12 +
 .../store/driver/TestStateStoreFileBase.java|  47 ++
 .../store/driver/TestStateStoreFileSystem.java  |  14 +-
 10 files changed, 428 insertions(+), 350 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements 
StateStoreMBean {
 writes.resetMinMax();
 removes.resetMinMax();
 failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
 
   /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new 
instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-   QueryResult get(Class clazz, String sub)
-  throws IOException;
-
-  /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
--- 

hadoop git commit: HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo Goiri.

2018-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d7aa93b2f -> 19521f71d


HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo 
Goiri.

(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19521f71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19521f71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19521f71

Branch: refs/heads/branch-3.1
Commit: 19521f71d445d61434af380840a285e06503eed1
Parents: d7aa93b
Author: Yiqun Lin 
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Mar 14 11:23:01 2018 +0800

--
 .../federation/metrics/StateStoreMetrics.java   |   5 +
 .../driver/StateStoreRecordOperations.java  |  15 -
 .../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
 .../store/driver/impl/StateStoreFileImpl.java   | 109 ++---
 .../driver/impl/StateStoreFileSystemImpl.java   | 128 +++---
 .../driver/impl/StateStoreZooKeeperImpl.java|   6 -
 .../store/driver/TestStateStoreDriverBase.java  |   9 +
 .../store/driver/TestStateStoreFile.java|  12 +
 .../store/driver/TestStateStoreFileBase.java|  47 ++
 .../store/driver/TestStateStoreFileSystem.java  |  14 +-
 10 files changed, 428 insertions(+), 350 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements 
StateStoreMBean {
 writes.resetMinMax();
 removes.resetMinMax();
 failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
 
   /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new 
instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-   QueryResult get(Class clazz, String sub)
-  throws IOException;
-
-  /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
--- 

hadoop git commit: HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo Goiri.

2018-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 427fd027a -> 76be6cbf6


HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76be6cbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76be6cbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76be6cbf

Branch: refs/heads/trunk
Commit: 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7
Parents: 427fd02
Author: Yiqun Lin 
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Mar 14 11:20:59 2018 +0800

--
 .../federation/metrics/StateStoreMetrics.java   |   5 +
 .../driver/StateStoreRecordOperations.java  |  15 -
 .../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
 .../store/driver/impl/StateStoreFileImpl.java   | 109 ++---
 .../driver/impl/StateStoreFileSystemImpl.java   | 128 +++---
 .../driver/impl/StateStoreZooKeeperImpl.java|   6 -
 .../store/driver/TestStateStoreDriverBase.java  |   9 +
 .../store/driver/TestStateStoreFile.java|  12 +
 .../store/driver/TestStateStoreFileBase.java|  47 ++
 .../store/driver/TestStateStoreFileSystem.java  |  14 +-
 10 files changed, 428 insertions(+), 350 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements 
StateStoreMBean {
 writes.resetMinMax();
 removes.resetMinMax();
 failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
 
   /**
-   * Get all records of the requested record class from the data store. To use
-   * the default implementations in this class, getAll must return new 
instances
-   * of the records on each call. It is recommended to override the default
-   * implementations for better performance.
-   *
-   * @param clazz Class of record to fetch.
-   * @param sub Sub path.
-   * @return List of all records that match the clazz and the sub path.
-   * @throws IOException
-   */
-  @Idempotent
-   QueryResult get(Class clazz, String sub)
-  throws IOException;
-
-  /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
+++ 

[2/5] hadoop git commit: HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed by Xiao Liang.

2018-03-13 Thread inigoiri
HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed 
by Xiao Liang.

(cherry picked from commit 427fd027a353f665846f43dfd73faf7561fedc07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7aa93b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7aa93b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7aa93b2

Branch: refs/heads/branch-3.1
Commit: d7aa93b2ff81a1e4e93d25e3c94d13fa91f9c31a
Parents: 8064150
Author: Inigo Goiri 
Authored: Tue Mar 13 20:01:07 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Mar 13 20:01:41 2018 -0700

--
 .../org/apache/hadoop/conf/TestConfiguration.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7aa93b2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index c9dd7cc..f1d68cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -61,7 +61,6 @@ import static org.apache.hadoop.conf.StorageUnit.MB;
 import static org.apache.hadoop.conf.StorageUnit.TB;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -93,8 +92,8 @@ public class TestConfiguration {
   final static String CONFIG_CORE = new File("./core-site.xml")
   .getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new 
File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-  + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+  "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
 "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -877,7 +876,8 @@ public class TestConfiguration {
 out.close();
 out=new BufferedWriter(new FileWriter(CONFIG));
 writeHeader();
-declareSystemEntity("configuration", "d", CONFIG2);
+declareSystemEntity("configuration", "d",
+new Path(CONFIG2).toUri().toString());
 writeConfiguration();
 appendProperty("a", "b");
 appendProperty("c", "");
@@ -1749,7 +1749,7 @@ public class TestConfiguration {
   assertEquals("test.key2", jp1.getKey());
   assertEquals("value2", jp1.getValue());
   assertEquals(true, jp1.isFinal);
-  assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+  assertEquals(fileResource.toString(), jp1.getResource());
 
   // test xml format
   outWriter = new StringWriter();
@@ -1760,7 +1760,7 @@ public class TestConfiguration {
   assertEquals(1, actualConf1.size());
   assertEquals("value2", actualConf1.get("test.key2"));
   assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-  assertEquals(fileResource.toUri().getPath(),
+  assertEquals(fileResource.toString(),
   actualConf1.getPropertySources("test.key2")[0]);
 
   // case 2: dump an non existing property
@@ -2271,7 +2271,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -2299,7 +2300,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);


-
To unsubscribe, e-mail: 

[4/5] hadoop git commit: HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed by Xiao Liang.

2018-03-13 Thread inigoiri
HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed 
by Xiao Liang.

(cherry picked from commit 427fd027a353f665846f43dfd73faf7561fedc07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36451f2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36451f2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36451f2d

Branch: refs/heads/branch-2
Commit: 36451f2d544ec493b2dd676a4095c869ccb18721
Parents: 340cd5f
Author: Inigo Goiri 
Authored: Tue Mar 13 20:01:07 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Mar 13 20:06:07 2018 -0700

--
 .../org/apache/hadoop/conf/TestConfiguration.java  | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36451f2d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index f6c2d46..bceae3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -74,8 +74,8 @@ public class TestConfiguration extends TestCase {
   final static String CONFIG = new 
File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG2 = new 
File("./test-config2-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new 
File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-  + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+  "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
 "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -720,7 +720,8 @@ public class TestConfiguration extends TestCase {
 out.close();
 out=new BufferedWriter(new FileWriter(CONFIG));
 writeHeader();
-declareSystemEntity("configuration", "d", CONFIG2);
+declareSystemEntity("configuration", "d",
+new Path(CONFIG2).toUri().toString());
 writeConfiguration();
 appendProperty("a", "b");
 appendProperty("c", "");
@@ -1448,7 +1449,7 @@ public class TestConfiguration extends TestCase {
   assertEquals("test.key2", jp1.getKey());
   assertEquals("value2", jp1.getValue());
   assertEquals(true, jp1.isFinal);
-  assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+  assertEquals(fileResource.toString(), jp1.getResource());
 
   // test xml format
   outWriter = new StringWriter();
@@ -1459,7 +1460,7 @@ public class TestConfiguration extends TestCase {
   assertEquals(1, actualConf1.size());
   assertEquals("value2", actualConf1.get("test.key2"));
   assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-  assertEquals(fileResource.toUri().getPath(),
+  assertEquals(fileResource.toString(),
   actualConf1.getPropertySources("test.key2")[0]);
 
   // case 2: dump an non existing property
@@ -1914,7 +1915,8 @@ public class TestConfiguration extends TestCase {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -1941,7 +1943,8 @@ public class TestConfiguration extends TestCase {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed by Xiao Liang.

2018-03-13 Thread inigoiri
HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed 
by Xiao Liang.

(cherry picked from commit 427fd027a353f665846f43dfd73faf7561fedc07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95a4665a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95a4665a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95a4665a

Branch: refs/heads/branch-3.0
Commit: 95a4665ad57153ae215c7a09e47453561e43f15b
Parents: 9889e55
Author: Inigo Goiri 
Authored: Tue Mar 13 20:01:07 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Mar 13 20:02:06 2018 -0700

--
 .../org/apache/hadoop/conf/TestConfiguration.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a4665a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index e90b4f0..1ebfa35 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -57,7 +57,6 @@ import static org.apache.hadoop.conf.StorageUnit.MB;
 import static org.apache.hadoop.conf.StorageUnit.TB;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -88,8 +87,8 @@ public class TestConfiguration {
   final static String CONFIG = new 
File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG2 = new 
File("./test-config2-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new 
File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-  + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+  "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
 "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -842,7 +841,8 @@ public class TestConfiguration {
 out.close();
 out=new BufferedWriter(new FileWriter(CONFIG));
 writeHeader();
-declareSystemEntity("configuration", "d", CONFIG2);
+declareSystemEntity("configuration", "d",
+new Path(CONFIG2).toUri().toString());
 writeConfiguration();
 appendProperty("a", "b");
 appendProperty("c", "");
@@ -1714,7 +1714,7 @@ public class TestConfiguration {
   assertEquals("test.key2", jp1.getKey());
   assertEquals("value2", jp1.getValue());
   assertEquals(true, jp1.isFinal);
-  assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+  assertEquals(fileResource.toString(), jp1.getResource());
 
   // test xml format
   outWriter = new StringWriter();
@@ -1725,7 +1725,7 @@ public class TestConfiguration {
   assertEquals(1, actualConf1.size());
   assertEquals("value2", actualConf1.get("test.key2"));
   assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-  assertEquals(fileResource.toUri().getPath(),
+  assertEquals(fileResource.toString(),
   actualConf1.getPropertySources("test.key2")[0]);
 
   // case 2: dump an non existing property
@@ -2236,7 +2236,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -2264,7 +2265,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);



[1/5] hadoop git commit: HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed by Xiao Liang.

2018-03-13 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 340cd5f1b -> 36451f2d5
  refs/heads/branch-2.9 98b086cec -> 60feb43b7
  refs/heads/branch-3.0 9889e55e5 -> 95a4665ad
  refs/heads/branch-3.1 80641508c -> d7aa93b2f
  refs/heads/trunk b167d6076 -> 427fd027a


HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed 
by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/427fd027
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/427fd027
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/427fd027

Branch: refs/heads/trunk
Commit: 427fd027a353f665846f43dfd73faf7561fedc07
Parents: b167d60
Author: Inigo Goiri 
Authored: Tue Mar 13 20:01:07 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Mar 13 20:01:07 2018 -0700

--
 .../org/apache/hadoop/conf/TestConfiguration.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/427fd027/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index c9dd7cc..f1d68cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -61,7 +61,6 @@ import static org.apache.hadoop.conf.StorageUnit.MB;
 import static org.apache.hadoop.conf.StorageUnit.TB;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -93,8 +92,8 @@ public class TestConfiguration {
   final static String CONFIG_CORE = new File("./core-site.xml")
   .getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new 
File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-  + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+  "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
 "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -877,7 +876,8 @@ public class TestConfiguration {
 out.close();
 out=new BufferedWriter(new FileWriter(CONFIG));
 writeHeader();
-declareSystemEntity("configuration", "d", CONFIG2);
+declareSystemEntity("configuration", "d",
+new Path(CONFIG2).toUri().toString());
 writeConfiguration();
 appendProperty("a", "b");
 appendProperty("c", "");
@@ -1749,7 +1749,7 @@ public class TestConfiguration {
   assertEquals("test.key2", jp1.getKey());
   assertEquals("value2", jp1.getValue());
   assertEquals(true, jp1.isFinal);
-  assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+  assertEquals(fileResource.toString(), jp1.getResource());
 
   // test xml format
   outWriter = new StringWriter();
@@ -1760,7 +1760,7 @@ public class TestConfiguration {
   assertEquals(1, actualConf1.size());
   assertEquals("value2", actualConf1.get("test.key2"));
   assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-  assertEquals(fileResource.toUri().getPath(),
+  assertEquals(fileResource.toString(),
   actualConf1.getPropertySources("test.key2")[0]);
 
   // case 2: dump an non existing property
@@ -2271,7 +2271,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -2299,7 +2300,8 @@ public class TestConfiguration {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 

[5/5] hadoop git commit: HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed by Xiao Liang.

2018-03-13 Thread inigoiri
HADOOP-15308. TestConfiguration fails on Windows because of paths. Contributed 
by Xiao Liang.

(cherry picked from commit 427fd027a353f665846f43dfd73faf7561fedc07)
(cherry picked from commit 36451f2d544ec493b2dd676a4095c869ccb18721)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60feb43b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60feb43b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60feb43b

Branch: refs/heads/branch-2.9
Commit: 60feb43b7c24127af1d791a1cf10248bffd143c7
Parents: 98b086c
Author: Inigo Goiri 
Authored: Tue Mar 13 20:01:07 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Mar 13 20:07:09 2018 -0700

--
 .../org/apache/hadoop/conf/TestConfiguration.java  | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60feb43b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index f6c2d46..bceae3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -74,8 +74,8 @@ public class TestConfiguration extends TestCase {
   final static String CONFIG = new 
File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG2 = new 
File("./test-config2-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG_FOR_ENUM = new 
File("./test-config-enum-TestConfiguration.xml").getAbsolutePath();
-  final static String CONFIG_FOR_URI = "file://"
-  + new File("./test-config-uri-TestConfiguration.xml").getAbsolutePath();
+  final static String CONFIG_FOR_URI = new File(
+  "./test-config-uri-TestConfiguration.xml").toURI().toString();
 
   private static final String CONFIG_MULTI_BYTE = new File(
 "./test-config-multi-byte-TestConfiguration.xml").getAbsolutePath();
@@ -720,7 +720,8 @@ public class TestConfiguration extends TestCase {
 out.close();
 out=new BufferedWriter(new FileWriter(CONFIG));
 writeHeader();
-declareSystemEntity("configuration", "d", CONFIG2);
+declareSystemEntity("configuration", "d",
+new Path(CONFIG2).toUri().toString());
 writeConfiguration();
 appendProperty("a", "b");
 appendProperty("c", "");
@@ -1448,7 +1449,7 @@ public class TestConfiguration extends TestCase {
   assertEquals("test.key2", jp1.getKey());
   assertEquals("value2", jp1.getValue());
   assertEquals(true, jp1.isFinal);
-  assertEquals(fileResource.toUri().getPath(), jp1.getResource());
+  assertEquals(fileResource.toString(), jp1.getResource());
 
   // test xml format
   outWriter = new StringWriter();
@@ -1459,7 +1460,7 @@ public class TestConfiguration extends TestCase {
   assertEquals(1, actualConf1.size());
   assertEquals("value2", actualConf1.get("test.key2"));
   assertTrue(actualConf1.getFinalParameters().contains("test.key2"));
-  assertEquals(fileResource.toUri().getPath(),
+  assertEquals(fileResource.toString(),
   actualConf1.getPropertySources("test.key2")[0]);
 
   // case 2: dump an non existing property
@@ -1914,7 +1915,8 @@ public class TestConfiguration extends TestCase {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
@@ -1941,7 +1943,8 @@ public class TestConfiguration extends TestCase {
 final File tmpDir = GenericTestUtils.getRandomizedTestDir();
 tmpDir.mkdirs();
 final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
-"file",  new File(tmpDir, "test.jks").toString(), null).toString();
+"file",  new File(tmpDir, "test.jks").toURI().getPath(),
+null).toString();
 
 conf = new Configuration(false);
 conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[40/50] hadoop git commit: HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma.

2018-03-13 Thread aengineer
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84c10955
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84c10955
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84c10955

Branch: refs/heads/HDFS-7240
Commit: 84c10955863eca1e300aeeac1d9cd7a1186144b6
Parents: b2b9ce5
Author: Xiao Chen 
Authored: Tue Mar 13 09:57:20 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 09:58:03 2018 -0700

--
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 --
 .../hdfs/TestFileStatusWithRandomECPolicy.java  | 49 
 2 files changed, 59 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index 077cf3a..a5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,7 +34,10 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-public class TestFileStatusWithECPolicy {
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DFSClient client;
@@ -50,8 +53,7 @@ public class TestFileStatusWithECPolicy {
 cluster.waitActive();
 fs = cluster.getFileSystem();
 client = fs.getClient();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
   }
 
   @After
@@ -62,6 +64,10 @@ public class TestFileStatusWithECPolicy {
 }
   }
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Test
   public void testFileStatusWithECPolicy() throws Exception {
 // test directory doesn't have an EC policy
@@ -76,8 +82,7 @@ public class TestFileStatusWithECPolicy {
 ContractTestUtils.assertNotErasureCoded(fs, file);
 fs.delete(file, true);
 
-final ErasureCodingPolicy ecPolicy1 =
-StripedFileTestUtil.getDefaultECPolicy();
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
 // set EC policy on dir
 fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
 ContractTestUtils.assertErasureCoded(fs, dir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
new file mode 100644
index 000..18902a7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestFileStatusWithDefaultECPolicy to use a random
+ * (non-default) EC policy.
+ */
+public class TestFileStatusWithRandomECPolicy extends
+TestFileStatusWithDefaultECPolicy {
+  private static final Logger LOG = 

[34/50] hadoop git commit: HDFS-12156. TestFSImage fails without -Pnative

2018-03-13 Thread aengineer
HDFS-12156. TestFSImage fails without -Pnative


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/319defaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/319defaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/319defaf

Branch: refs/heads/HDFS-7240
Commit: 319defafc105c0d0b69b83828b578d9c453036f5
Parents: 4afd50b
Author: Akira Ajisaka 
Authored: Tue Mar 13 11:26:48 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 13 11:26:56 2018 +0900

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFSImage.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/319defaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 38a6dab..ba08f73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -74,6 +75,7 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Assume;
 import org.junit.Test;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -99,6 +101,13 @@ public class TestFSImage {
 setCompressCodec(conf, "org.apache.hadoop.io.compress.DefaultCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.GzipCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.BZip2Codec");
+  }
+
+  @Test
+  public void testNativeCompression() throws IOException {
+Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
 setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec");
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] hadoop git commit: HDFS-13141. WebHDFS: Add support for getting snasphottable directory list. Contributed by Lokesh Jain.

2018-03-13 Thread aengineer
HDFS-13141. WebHDFS: Add support for getting snasphottable directory list. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0355ec20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0355ec20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0355ec20

Branch: refs/heads/HDFS-7240
Commit: 0355ec20ebeb988679c7192c7024bef7a2a3bced
Parents: 45d1b0f
Author: Xiaoyu Yao 
Authored: Mon Mar 12 16:37:29 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon Mar 12 20:41:37 2018 -0700

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |  1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 ++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 42 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 20 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 75 +++-
 8 files changed, 184 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index bbd1bd7..3dcf13b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -88,6 +88,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
 SET_TIMES(CommonStatisticNames.OP_SET_TIMES),
 SET_XATTR("op_set_xattr"),
 GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
+GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 264e3f4..cb05c75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,7 +50,32 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED
+SNAPSHOT_ENABLED;
+
+/**
+ * Generates an enum set of Flags from a set of attr flags.
+ * @param attr Set of attr flags
+ * @return EnumSet of Flags
+ */
+public static EnumSet convert(Set attr) {
+  if (attr.isEmpty()) {
+return EnumSet.noneOf(Flags.class);
+  }
+  EnumSet flags = EnumSet.noneOf(Flags.class);
+  if (attr.contains(AttrFlags.HAS_ACL)) {
+flags.add(Flags.HAS_ACL);
+  }
+  if (attr.contains(AttrFlags.HAS_EC)) {
+flags.add(Flags.HAS_EC);
+  }
+  if (attr.contains(AttrFlags.HAS_CRYPT)) {
+flags.add(Flags.HAS_CRYPT);
+  }
+  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
+flags.add(Flags.SNAPSHOT_ENABLED);
+  }
+  return flags;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 2725e9c..aa79dc4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import 

[35/50] hadoop git commit: HDFS-13253. RBF: Quota management incorrect parent-child relationship judgement. Contributed by Yiqun Lin.

2018-03-13 Thread aengineer
HDFS-13253. RBF: Quota management incorrect parent-child relationship 
judgement. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fab787d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fab787d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fab787d

Branch: refs/heads/HDFS-7240
Commit: 7fab787de72756863a91c2358da5c611afdb80e9
Parents: 319defa
Author: Yiqun Lin 
Authored: Tue Mar 13 10:30:20 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 13 10:30:20 2018 +0800

--
 .../federation/resolver/MountTableResolver.java | 14 ++
 .../federation/router/FederationUtil.java   | 20 
 .../federation/router/RouterQuotaManager.java   | 14 +-
 .../router/TestRouterQuotaManager.java  | 12 
 4 files changed, 47 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fab787d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 2c7d1f8..27b43e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -239,7 +240,7 @@ public class MountTableResolver
   PathLocation loc = entry.getValue();
   String src = loc.getSourcePath();
   if (src != null) {
-if (src.startsWith(path)) {
+if(isParentEntry(src, path)) {
   LOG.debug("Removing {}", src);
   it.remove();
 }
@@ -530,17 +531,6 @@ public class MountTableResolver
 return this.defaultNameService;
   }
 
-  private boolean isParentEntry(final String path, final String parent) {
-if (!path.startsWith(parent)) {
-  return false;
-}
-if (path.equals(parent)) {
-  return true;
-}
-return path.charAt(parent.length()) == Path.SEPARATOR_CHAR
-|| parent.equals(Path.SEPARATOR);
-  }
-
   /**
* Find the deepest mount point for a path.
* @param path Path to look for.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fab787d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
index 8d631e9..3dfd998 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
@@ -26,6 +26,7 @@ import java.net.URL;
 import java.net.URLConnection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@@ -186,4 +187,23 @@ public final class FederationUtil {
 ActiveNamenodeResolver.class);
 return newInstance(conf, stateStore, StateStoreService.class, clazz);
   }
+
+  /**
+   * Check if the given path is the child of parent path.
+   * @param path Path to be check.
+   * @param parent Parent path.
+   * @return True if parent path is parent entry for given path.
+   */
+  public static boolean isParentEntry(final String path, final String parent) {
+if (!path.startsWith(parent)) {
+  return false;
+}
+
+if (path.equals(parent)) {
+  return true;
+}
+
+return path.charAt(parent.length()) == 

[46/50] hadoop git commit: YARN-5764. NUMA awareness support for launching containers. Contributed by Devaraj K.

2018-03-13 Thread aengineer
YARN-5764. NUMA awareness support for launching containers. Contributed by 
Devaraj K.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a82d4a2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a82d4a2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a82d4a2e

Branch: refs/heads/HDFS-7240
Commit: a82d4a2e3a6a5448e371cef0cb86d5dbe4871ccd
Parents: 45cccad
Author: Miklos Szegedi 
Authored: Tue Mar 13 11:03:27 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue Mar 13 12:36:57 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  27 ++
 .../src/main/resources/yarn-default.xml |  51 +++
 .../nodemanager/LinuxContainerExecutor.java |  18 +-
 .../linux/privileged/PrivilegedOperation.java   |   3 +-
 .../linux/resources/ResourceHandlerModule.java  |  10 +
 .../linux/resources/numa/NumaNodeResource.java  | 204 +++
 .../resources/numa/NumaResourceAllocation.java  |  69 
 .../resources/numa/NumaResourceAllocator.java   | 342 +++
 .../resources/numa/NumaResourceHandlerImpl.java | 108 ++
 .../linux/resources/numa/package-info.java  |  28 ++
 .../numa/TestNumaResourceAllocator.java | 281 +++
 .../numa/TestNumaResourceHandlerImpl.java   | 181 ++
 12 files changed, 1318 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82d4a2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6677478..2afff43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3585,6 +3585,22 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
   DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
 
+  /**
+   * Settings for NUMA awareness.
+   */
+  public static final String NM_NUMA_AWARENESS_ENABLED = NM_PREFIX
+  + "numa-awareness.enabled";
+  public static final boolean DEFAULT_NM_NUMA_AWARENESS_ENABLED = false;
+  public static final String NM_NUMA_AWARENESS_READ_TOPOLOGY = NM_PREFIX
+  + "numa-awareness.read-topology";
+  public static final boolean DEFAULT_NM_NUMA_AWARENESS_READ_TOPOLOGY = false;
+  public static final String NM_NUMA_AWARENESS_NODE_IDS = NM_PREFIX
+  + "numa-awareness.node-ids";
+  public static final String NM_NUMA_AWARENESS_NUMACTL_CMD = NM_PREFIX
+  + "numa-awareness.numactl.cmd";
+  public static final String DEFAULT_NM_NUMA_AWARENESS_NUMACTL_CMD =
+  "/usr/bin/numactl";
+
   public YarnConfiguration() {
 super();
   }
@@ -3791,6 +3807,17 @@ public class YarnConfiguration extends Configuration {
 YarnConfiguration.DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED);
   }
 
+  /**
+   * Returns whether the NUMA awareness is enabled.
+   *
+   * @param conf the configuration
+   * @return whether the NUMA awareness is enabled.
+   */
+  public static boolean numaAwarenessEnabled(Configuration conf) {
+return conf.getBoolean(NM_NUMA_AWARENESS_ENABLED,
+DEFAULT_NM_NUMA_AWARENESS_ENABLED);
+  }
+
   /* For debugging. mp configurations to system output as XML format. */
   public static void main(String[] args) throws Exception {
 new YarnConfiguration(new Configuration()).writeXml(System.out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82d4a2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index adf8d8a..e192a0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3711,4 +3711,55 @@
 
   
 
+  
+
+Whether to enable the NUMA awareness for containers in Node Manager.
+
+yarn.nodemanager.numa-awareness.enabled
+false
+  
+
+  
+
+Whether to read the NUMA topology from the system or from the
+configurations. If the value is true then NM reads the NUMA topology from
+system 

[50/50] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-03-13 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5dfae69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5dfae69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5dfae69

Branch: refs/heads/HDFS-7240
Commit: a5dfae69f5843cd613ad82a8a31545ee14f652ef
Parents: faa01f3 9714fc1
Author: Anu Engineer 
Authored: Tue Mar 13 18:16:12 2018 -0700
Committer: Anu Engineer 
Committed: Tue Mar 13 18:16:12 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +-
 .../src/main/conf/log4j.properties  |   3 +
 .../org/apache/hadoop/http/HttpServer2.java |  12 +-
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 124 -
 .../src/main/resources/core-default.xml |  11 +
 .../org/apache/hadoop/http/TestHttpServer.java  |  23 +-
 .../hadoop/http/TestHttpServerWithSpengo.java   | 494 ---
 .../org/apache/hadoop/log/TestLogLevel.java |   4 +-
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 ---
 .../apache/hadoop/test/GenericTestUtils.java|  68 ++-
 .../hadoop/crypto/key/kms/server/TestKMS.java   |   6 +-
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   1 +
 .../hadoop/hdfs/protocol/DatanodeInfo.java  |  31 +-
 .../hadoop/hdfs/protocol/LocatedBlocks.java |  12 +-
 .../protocol/SnapshottableDirectoryStatus.java  |   8 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   5 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  34 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  14 +
 .../hadoop/hdfs/web/resources/GetOpParam.java   |   3 +-
 .../src/main/proto/hdfs.proto   |   1 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +-
 .../federation/resolver/MountTableResolver.java |  18 +-
 .../federation/router/ConnectionManager.java|  59 ++-
 .../federation/router/ConnectionPool.java   |   2 +-
 .../federation/router/ConnectionPoolId.java |   6 +
 .../federation/router/FederationUtil.java   |  20 +
 .../federation/router/RouterQuotaManager.java   |  14 +-
 .../federation/store/records/BaseRecord.java|  16 +-
 .../store/records/MembershipState.java  |  29 +-
 .../federation/store/records/MountTable.java|  42 +-
 .../federation/store/records/RouterState.java   |   9 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   1 +
 .../hdfs/server/namenode/INodeDirectory.java|  10 +-
 .../hdfs/server/namenode/INodeReference.java|   4 -
 .../snapshot/DirectorySnapshottableFeature.java |   5 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  | 131 ++---
 .../snapshot/FSImageFormatPBSnapshot.java   |   6 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  11 +-
 .../snapshot/SnapshotDiffListingInfo.java   |  15 +-
 .../snapshot/SnapshotFSImageFormat.java |   4 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |   7 +
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  12 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java  | 131 +++--
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  23 +
 .../src/main/resources/hdfs-default.xml |   6 +-
 .../src/main/webapps/hdfs/dfshealth.html|   3 +
 .../src/main/webapps/hdfs/explorer.html |   3 +
 .../src/site/markdown/HDFSDiskbalancer.md   |   6 +-
 .../src/site/markdown/HDFSRouterFederation.md   |  12 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  92 
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |  15 +
 .../hdfs/TestFileStatusWithDefaultECPolicy.java | 107 
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 102 
 .../hdfs/TestFileStatusWithRandomECPolicy.java  |  49 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java |  48 +-
 ...econstructStripedFileWithRandomECPolicy.java |  49 ++
 .../TestBalancerWithMultipleNameNodes.java  |  10 +-
 .../blockmanagement/TestBlockInfoStriped.java   |  33 +-
 .../blockmanagement/TestBlockStatsMXBean.java   |   5 +
 .../TestLowRedundancyBlockQueues.java   |  19 +-
 .../TestPendingReconstruction.java  |  10 +-
 .../datanode/TestDataNodeVolumeFailure.java |  37 +-
 .../resolver/TestMountTableResolver.java|  69 +++
 .../router/TestConnectionManager.java   | 157 ++
 .../federation/router/TestRouterAdminCLI.java   |  38 +-
 .../federation/router/TestRouterQuota.java  |   2 +-
 .../router/TestRouterQuotaManager.java  |  12 +
 .../federation/router/TestRouterSafemode.java   |   9 +
 .../store/records/TestMountTable.java   |  43 ++
 .../hdfs/server/namenode/TestFSImage.java   |   9 +
 

[39/50] hadoop git commit: HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with HdfsFileStatus as argument. Contributed by Lokesh Jain

2018-03-13 Thread aengineer
HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with 
HdfsFileStatus as argument. Contributed by Lokesh Jain


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2b9ce58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2b9ce58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2b9ce58

Branch: refs/heads/HDFS-7240
Commit: b2b9ce585984a1791a8af3e2287c75c75b95586f
Parents: 0355ec2
Author: Chris Douglas 
Authored: Tue Mar 13 09:43:22 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 09:43:22 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 +---
 .../protocol/SnapshottableDirectoryStatus.java  |  8 ++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 10 +---
 3 files changed, 10 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index cb05c75..264e3f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,32 +50,7 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED;
-
-/**
- * Generates an enum set of Flags from a set of attr flags.
- * @param attr Set of attr flags
- * @return EnumSet of Flags
- */
-public static EnumSet convert(Set attr) {
-  if (attr.isEmpty()) {
-return EnumSet.noneOf(Flags.class);
-  }
-  EnumSet flags = EnumSet.noneOf(Flags.class);
-  if (attr.contains(AttrFlags.HAS_ACL)) {
-flags.add(Flags.HAS_ACL);
-  }
-  if (attr.contains(AttrFlags.HAS_EC)) {
-flags.add(Flags.HAS_EC);
-  }
-  if (attr.contains(AttrFlags.HAS_CRYPT)) {
-flags.add(Flags.HAS_CRYPT);
-  }
-  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
-flags.add(Flags.SNAPSHOT_ENABLED);
-  }
-  return flags;
-}
+SNAPSHOT_ENABLED
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 6cdb2ee..0d35238 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -79,6 +79,14 @@ public class SnapshottableDirectoryStatus {
 this.parentFullPath = parentFullPath;
   }
 
+  public SnapshottableDirectoryStatus(HdfsFileStatus dirStatus,
+  int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+this.dirStatus = dirStatus;
+this.snapshotNumber = snapshotNumber;
+this.snapshotQuota = snapshotQuota;
+this.parentFullPath = parentFullPath;
+  }
+
   /**
* @return Number of snapshots that have been taken for the directory
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index aa79dc4..13c5226 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -65,7 +65,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 class JsonUtilClient {
   static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
@@ -772,15 +771,8 @@ class JsonUtilClient {
 byte[] parentFullPath = toByteArray((String) 

[42/50] hadoop git commit: HDFS-12587. Use Parameterized tests in TestBlockInfoStriped and TestLowRedundancyBlockQueues to test all EC policies. Contributed by Takanobu Asanuma.

2018-03-13 Thread aengineer
HDFS-12587. Use Parameterized tests in TestBlockInfoStriped and 
TestLowRedundancyBlockQueues to test all EC policies. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b8dbc2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b8dbc2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b8dbc2c

Branch: refs/heads/HDFS-7240
Commit: 3b8dbc2cb766ba9fc1d655c891d32f5b4e4aa9c8
Parents: d6931c3
Author: Xiao Chen 
Authored: Tue Mar 13 10:12:52 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:14:05 2018 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 15 +
 .../blockmanagement/TestBlockInfoStriped.java   | 33 ++--
 .../TestLowRedundancyBlockQueues.java   | 19 +--
 3 files changed, 55 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b8dbc2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 13ca390..35edab9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -46,6 +46,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -591,4 +592,18 @@ public class StripedFileTestUtil {
 .getPolicies();
 return policies.get(1 + rand.nextInt(policies.size() - 1));
   }
+
+  /**
+   * Get all Erasure Coding Policies for Parameterized tests.
+   * @return Collection
+   */
+  public static Collection getECPolicies() {
+ArrayList params = new ArrayList<>();
+List policies =
+SystemErasureCodingPolicies.getPolicies();
+for (ErasureCodingPolicy policy: policies) {
+  params.add(new Object[]{policy});
+}
+return params;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b8dbc2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 1040d21..becf868 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -25,29 +25,42 @@ import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.mockito.internal.util.reflection.Whitebox;
 
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.nio.ByteBuffer;
+import java.util.Collection;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 /**
- * Test {@link BlockInfoStriped}
+ * Test {@link BlockInfoStriped}.
  */
+@RunWith(Parameterized.class)
 public class TestBlockInfoStriped {
   private static final long BASE_ID = -1600;
   private final Block baseBlock = new Block(BASE_ID);
-  private final ErasureCodingPolicy testECPolicy
-  = StripedFileTestUtil.getDefaultECPolicy();
-  private final int totalBlocks = testECPolicy.getNumDataUnits() +
-  testECPolicy.getNumParityUnits();
-  private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
-  testECPolicy);
+  private final ErasureCodingPolicy testECPolicy;
+  private final int totalBlocks;
+  private final BlockInfoStriped info;
+
+  public TestBlockInfoStriped(ErasureCodingPolicy policy) {
+testECPolicy = policy;
+totalBlocks = testECPolicy.getNumDataUnits()
++ testECPolicy.getNumParityUnits();
+info = new BlockInfoStriped(baseBlock, testECPolicy);
+  }
+
+  @Parameterized.Parameters(name = "{index}: {0}")
+  public static Collection policies() {
+return StripedFileTestUtil.getECPolicies();
+  }
 
   private 

[44/50] hadoop git commit: HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma.

2018-03-13 Thread aengineer
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8211a3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8211a3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8211a3d4

Branch: refs/heads/HDFS-7240
Commit: 8211a3d4693fea46cff11c5883c16a9b4df7b4de
Parents: f82d38d
Author: Xiao Chen 
Authored: Tue Mar 13 10:48:35 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:48:45 2018 -0700

--
 .../hdfs/TestFileStatusWithDefaultECPolicy.java | 107 +++
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 102 --
 .../hdfs/TestFileStatusWithRandomECPolicy.java  |  49 +
 3 files changed, 156 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8211a3d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
new file mode 100644
index 000..a5a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private DFSClient client;
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(30);
+
+  @Before
+  public void before() throws IOException {
+HdfsConfiguration conf = new HdfsConfiguration();
+cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+client = fs.getClient();
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
+  }
+
+  @After
+  public void after() {
+if (cluster != null) {
+  cluster.shutdown();
+  cluster = null;
+}
+  }
+
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  @Test
+  public void testFileStatusWithECPolicy() throws Exception {
+// test directory doesn't have an EC policy
+final Path dir = new Path("/foo");
+assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
+ContractTestUtils.assertNotErasureCoded(fs, dir);
+assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
+// test file doesn't have an EC policy
+final Path file = new Path(dir, "foo");
+fs.create(file).close();
+assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
+ContractTestUtils.assertNotErasureCoded(fs, file);
+fs.delete(file, true);
+
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+// set EC policy on dir
+fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
+ContractTestUtils.assertErasureCoded(fs, dir);
+final ErasureCodingPolicy ecPolicy2 =
+client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
+assertNotNull(ecPolicy2);
+

[47/50] hadoop git commit: HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. Contributed by Erik Krogen

2018-03-13 Thread aengineer
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. 
Contributed by Erik Krogen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6994da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6994da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6994da

Branch: refs/heads/HDFS-7240
Commit: 9d6994da1964c1125a33b3a65e7a7747e2d0bc59
Parents: a82d4a2
Author: Chris Douglas 
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 13:55:18 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 12 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 23 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
   "hadoop.http.socket.backlog.size";
   public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_ACCEPTOR_COUNT_KEY =
+  "hadoop.http.acceptor.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+  public static final String HTTP_SELECTOR_COUNT_KEY =
+  "hadoop.http.selector.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
   public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
 
 private ServerConnector createHttpChannelConnector(
 Server server, HttpConfiguration httpConfig) {
-  ServerConnector conn = new ServerConnector(server);
+  ServerConnector conn = new ServerConnector(server,
+  conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+  conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
   ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
   conn.addConnectionFactory(connFactory);
   configureChannelConnector(conn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
 Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
 server = createTestServer(conf);
 server.addServlet("echo", "/echo", EchoServlet.class);
 server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 ready.await();
 start.countDown();
   }
+
+  /**
+   * Test that the number of acceptors and selectors can be configured by
+   * trying to configure more of them than would be allowed based on the
+   * maximum thread count.
+   */
+  @Test
+  public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+conf.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, MAX_THREADS - 2);
+HttpServer2 badserver = createTestServer(conf);
+try {
+  

[43/50] hadoop git commit: Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma."

2018-03-13 Thread aengineer
Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma."

This reverts commit 84c10955863eca1e300aeeac1d9cd7a1186144b6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82d38dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82d38dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82d38dc

Branch: refs/heads/HDFS-7240
Commit: f82d38dcb3259dda6275c75765738fb9b249ee73
Parents: 3b8dbc2
Author: Xiao Chen 
Authored: Tue Mar 13 10:30:07 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:36:16 2018 -0700

--
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 ++
 .../hdfs/TestFileStatusWithRandomECPolicy.java  | 49 
 2 files changed, 5 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index a5a..077cf3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,10 +34,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-/**
- * This test ensures the statuses of EC files with the default policy.
- */
-public class TestFileStatusWithDefaultECPolicy {
+public class TestFileStatusWithECPolicy {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DFSClient client;
@@ -53,7 +50,8 @@ public class TestFileStatusWithDefaultECPolicy {
 cluster.waitActive();
 fs = cluster.getFileSystem();
 client = fs.getClient();
-fs.enableErasureCodingPolicy(getEcPolicy().getName());
+fs.enableErasureCodingPolicy(
+StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
   @After
@@ -64,10 +62,6 @@ public class TestFileStatusWithDefaultECPolicy {
 }
   }
 
-  public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
   @Test
   public void testFileStatusWithECPolicy() throws Exception {
 // test directory doesn't have an EC policy
@@ -82,7 +76,8 @@ public class TestFileStatusWithDefaultECPolicy {
 ContractTestUtils.assertNotErasureCoded(fs, file);
 fs.delete(file, true);
 
-final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+final ErasureCodingPolicy ecPolicy1 =
+StripedFileTestUtil.getDefaultECPolicy();
 // set EC policy on dir
 fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
 ContractTestUtils.assertErasureCoded(fs, dir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
deleted file mode 100644
index 18902a7..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This test extends TestFileStatusWithDefaultECPolicy to use a random
- * (non-default) EC policy.
- */
-public class TestFileStatusWithRandomECPolicy 

[45/50] hadoop git commit: HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei Jiang

2018-03-13 Thread aengineer
HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei 
Jiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cccadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cccadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cccadd

Branch: refs/heads/HDFS-7240
Commit: 45cccadd2e84b99ec56f1cc0e2248dc8fc844f38
Parents: 8211a3d
Author: Chris Douglas 
Authored: Tue Mar 13 11:08:11 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 11:08:11 2018 -0700

--
 .../src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cccadd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 2b3b529..eba4bee 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -527,7 +527,7 @@ public class DistCpUtils {
   /**
* Utility to compare checksums for the paths specified.
*
-   * If checksums's can't be retrieved, it doesn't fail the test
+   * If checksums can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] hadoop git commit: HDFS-13235. DiskBalancer: Update Documentation to add newly added options. Contributed by Bharat Viswanadham.

2018-03-13 Thread aengineer
HDFS-13235. DiskBalancer: Update Documentation to add newly added options. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39537b7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39537b7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39537b7c

Branch: refs/heads/HDFS-7240
Commit: 39537b7c84dddfa8084308459565ab77fd24abd3
Parents: 9d6994d
Author: Arpit Agarwal 
Authored: Tue Mar 13 16:35:51 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Mar 13 16:35:51 2018 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 6 +++---
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md  | 6 +-
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2d3c5e7..f90daba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4651,9 +4651,9 @@
 dfs.disk.balancer.plan.valid.interval
 1d
 
-  Maximum number of hours the disk balancer plan is valid.
-  This setting supports multiple time unit suffixes as described
-  in dfs.heartbeat.interval. If no suffix is specified then milliseconds
+  Maximum amount of time disk balancer plan is valid. This setting
+  supports multiple time unit suffixes as described in
+  dfs.heartbeat.interval. If no suffix is specified then milliseconds
   is assumed.
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 6e1bd41..ed0233a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -79,6 +79,10 @@ Execute command takes a plan command executes it against the 
datanode that plan
 
 This executes the plan by reading datanode’s address from the plan file.
 
+| COMMAND\_OPTION| Description |
+|: |: |
+| `-skipDateCheck` |  Skip date check and force execute the plan.|
+
 ### Query
 
 Query command gets the current status of the diskbalancer from a datanode.
@@ -122,7 +126,7 @@ There is a set of diskbalancer settings that can be 
controlled via hdfs-site.xml
 |`dfs.disk.balancer.max.disk.errors`| sets the value of maximum number of 
errors we can ignore for a specific move between two disks before it is 
abandoned. For example, if a plan has 3 pair of disks to copy between , and the 
first disk set encounters more than 5 errors, then we abandon the first copy 
and start the second copy in the plan. The default value of max errors is set 
to 5.|
 |`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent specifies 
when we have reached a good enough value for any copy step. For example, if you 
specify 10% then getting close to 10% of the target value is good enough.|
 |`dfs.disk.balancer.plan.threshold.percent`| The percentage threshold value 
for volume Data Density in a plan. If the absolute value of volume Data Density 
which is out of threshold value in a node, it means that the volumes 
corresponding to the disks should do the balancing in the plan. The default 
value is 10.|
-
+|`dfs.disk.balancer.plan.valid.interval`| Maximum amount of time disk balancer 
plan is valid. Supports the following suffixes (case insensitive): ms(millis), 
s(sec), m(min), h(hour), d(day) to specify the time (such as 2s, 2m, 1h, etc.). 
If no suffix is specified then milliseconds is assumed. Default value is 1d|
  Debugging
 -
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] hadoop git commit: HDFS-10803. TestBalancerWithMultipleNameNodes#testBalancing2OutOf3Blockpools fails intermittently due to no free space available. Contributed by Yiqun Lin.

2018-03-13 Thread aengineer
HDFS-10803. TestBalancerWithMultipleNameNodes#testBalancing2OutOf3Blockpools 
fails intermittently due to no free space available. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4afd50b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4afd50b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4afd50b1

Branch: refs/heads/HDFS-7240
Commit: 4afd50b10650a72162c40cf86dea44676013f262
Parents: 91c82c9
Author: Yiqun Lin 
Authored: Tue Mar 13 10:15:51 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 13 10:15:51 2018 +0800

--
 .../balancer/TestBalancerWithMultipleNameNodes.java   | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4afd50b1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index cf4c86f..c8929d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -416,13 +416,19 @@ public class TestBalancerWithMultipleNameNodes {
 }
 
 conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
+// Adjust the capacity of each DN since it will redistribute blocks
+// nNameNodes times in the following operations.
+long[] newCapacities = new long[nDataNodes];
+for (int i = 0; i < nDataNodes; i++) {
+  newCapacities[i] = capacities[i] * nNameNodes;
+}
 {
   LOG.info("UNEVEN 10");
   final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
   .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
   .numDataNodes(nDataNodes)
   .racks(racks)
-  .simulatedCapacities(capacities)
+  .simulatedCapacities(newCapacities)
   .format(false)
   .build();
   LOG.info("UNEVEN 11");
@@ -450,7 +456,7 @@ public class TestBalancerWithMultipleNameNodes {
   LOG.info("UNEVEN 13: n=" + n);
 }
 
-final long totalCapacity = TestBalancer.sum(capacities);
+final long totalCapacity = TestBalancer.sum(newCapacities);
 final long totalUsed = nNameNodes*usedSpacePerNN;
 LOG.info("UNEVEN 14");
 runBalancer(s, totalUsed, totalCapacity);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] hadoop git commit: HDFS-13226. RBF: Throw the exception if mount table entry validated failed. Contributed by maobaolong.

2018-03-13 Thread aengineer
HDFS-13226. RBF: Throw the exception if mount table entry validated failed. 
Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19292bc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19292bc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19292bc2

Branch: refs/heads/HDFS-7240
Commit: 19292bc264cada5117ec76063d36cc88159afdf4
Parents: 7fab787
Author: Yiqun Lin 
Authored: Tue Mar 13 11:03:31 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 13 11:03:31 2018 +0800

--
 .../federation/store/records/BaseRecord.java| 16 ++--
 .../store/records/MembershipState.java  | 29 -
 .../federation/store/records/MountTable.java| 42 +++
 .../federation/store/records/RouterState.java   |  9 ++--
 .../federation/router/TestRouterAdminCLI.java   | 38 +++--
 .../store/records/TestMountTable.java   | 43 
 6 files changed, 137 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index 79f99c8..d5e60ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -32,6 +32,10 @@ import org.apache.hadoop.util.Time;
  * 
  */
 public abstract class BaseRecord implements Comparable {
+  public static final String ERROR_MSG_CREATION_TIME_NEGATIVE =
+  "The creation time for the record cannot be negative.";
+  public static final String ERROR_MSG_MODIFICATION_TIME_NEGATIVE =
+  "The modification time for the record cannot be negative.";
 
   /**
* Set the modification time for the record.
@@ -193,11 +197,15 @@ public abstract class BaseRecord implements 
Comparable {
 
   /**
* Validates the record. Called when the record is created, populated from 
the
-   * state store, and before committing to the state store.
-   * @return If the record is valid.
+   * state store, and before committing to the state store. If validate failed,
+   * there throws an exception.
*/
-  public boolean validate() {
-return getDateCreated() > 0 && getDateModified() > 0;
+  public void validate() {
+if (getDateCreated() <= 0) {
+  throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE);
+} else if (getDateModified() <= 0) {
+  throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
index ac0b22e..e33dedf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
@@ -37,6 +37,14 @@ import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerialize
  */
 public abstract class MembershipState extends BaseRecord
 implements FederationNamenodeContext {
+  public static final String ERROR_MSG_NO_NS_SPECIFIED =
+  "Invalid registration, no nameservice specified ";
+  public static final String ERROR_MSG_NO_WEB_ADDR_SPECIFIED =
+  "Invalid registration, no web address specified ";
+  public static final String ERROR_MSG_NO_RPC_ADDR_SPECIFIED =
+  "Invalid registration, no rpc address specified ";
+  public static final String ERROR_MSG_NO_BP_SPECIFIED =
+  "Invalid registration, no block pool specified ";
 
   /** Expiration time in ms for this entry. */
   private static long expirationMs;
@@ -226,26 +234,25 @@ public abstract class MembershipState extends BaseRecord
* is missing required information.
*/
   @Override
-  public boolean validate() {
-boolean ret = 

[37/50] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-13 Thread aengineer
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d1b0fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d1b0fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d1b0fd

Branch: refs/heads/HDFS-7240
Commit: 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b
Parents: 19292bc
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:05:39 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 078a943..49d3575 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -1015,6 +998,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index cdde48c..61b0271 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -349,7 +349,7 @@ public abstract class GenericTestUtils {
   StringUtils.stringifyException(t)),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -499,18 +499,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
-
+
 private final CountDownLatch fireLatch = new CountDownLatch(1);
 private final CountDownLatch waitLatch = new CountDownLatch(1);
 private final CountDownLatch resultLatch = new CountDownLatch(1);
-
+
 private final AtomicInteger fireCounter = new 

[41/50] hadoop git commit: HDFS-13239. Fix non-empty dir warning message when setting default EC policy. Contributed by Bharat Viswanadham.

2018-03-13 Thread aengineer
HDFS-13239. Fix non-empty dir warning message when setting default EC policy. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6931c30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6931c30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6931c30

Branch: refs/heads/HDFS-7240
Commit: d6931c30c5a643ca192109b05a9c44da42a6318c
Parents: 84c1095
Author: Xiao Chen 
Authored: Tue Mar 13 10:06:55 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:07:26 2018 -0700

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 12 -
 .../test/resources/testErasureCodingConf.xml| 28 +---
 2 files changed, 30 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6931c30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index e30b083..9b9fe14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,17 +358,15 @@ public class ECAdmin extends Configured implements Tool {
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
 if (ecPolicyName == null){
-  System.out.println("Set default erasure coding policy" +
-  " on " + path);
-} else {
-  System.out.println("Set erasure coding policy " + ecPolicyName +
-  " on " + path);
+  ecPolicyName = "default";
 }
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
-  "non-empty directory will not automatically convert existing" +
-  " files to " + ecPolicyName);
+  "non-empty directory will not automatically convert existing " +
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6931c30/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index fc0c060..2f7a6a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -214,7 +214,7 @@
   
 
   SubstringComparator
-  Set erasure coding policy RS-6-3-1024k on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -232,7 +232,7 @@
   
 
   SubstringComparator
-  Set erasure coding policy RS-6-3-1024k on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -311,7 +311,7 @@
   
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
 
   
 
@@ -696,6 +696,28 @@
 
 
 
+  setPolicy : set erasure coding policy without given a 
specific policy name on a non empty directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -mkdir /ecdir/ecsubdir
+-fs NAMENODE -setPolicy -path 
/ecdir
+  
+  
+-fs NAMENODE -rm -R /ecdir
+  
+  
+
+  SubstringComparator
+  Set default erasure coding policy on 
/ecdir
+
+
+  SubstringComparator
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
+
+  
+
+
+
   getPolicy: get the default policy after setPolicy without 
given a specific policy name
   
 -fs NAMENODE -mkdir /ecdir



[49/50] hadoop git commit: HDFS-336. dfsadmin -report should report number of blocks from datanode. Contributed by Bharat Viswanadham.

2018-03-13 Thread aengineer
HDFS-336. dfsadmin -report should report number of blocks from datanode. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9714fc1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9714fc1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9714fc1d

Branch: refs/heads/HDFS-7240
Commit: 9714fc1dd48edb1c40d96d69ae82ed3b0fab7748
Parents: 39537b7
Author: Arpit Agarwal 
Authored: Tue Mar 13 16:39:17 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Mar 13 16:39:17 2018 -0700

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 31 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  5 ++-
 .../src/main/proto/hdfs.proto   |  1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  1 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 38 
 .../src/test/resources/testHDFSConf.xml |  4 +++
 6 files changed, 77 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9714fc1d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 0a8c915..c140d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -56,6 +56,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
   public static final DatanodeInfo[] EMPTY_ARRAY = {};
+  private int numBlocks;
 
   // Datanode administrative states
   public enum AdminStates {
@@ -106,6 +107,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.upgradeDomain = from.getUpgradeDomain();
 this.lastBlockReportTime = from.getLastBlockReportTime();
 this.lastBlockReportMonotonic = from.getLastBlockReportMonotonic();
+this.numBlocks = from.getNumBlocks();
   }
 
   protected DatanodeInfo(DatanodeID nodeID) {
@@ -123,6 +125,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.adminState = null;
 this.lastBlockReportTime = 0L;
 this.lastBlockReportMonotonic = 0L;
+this.numBlocks = 0;
   }
 
   protected DatanodeInfo(DatanodeID nodeID, String location) {
@@ -139,7 +142,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   final long lastUpdate, final long lastUpdateMonotonic,
   final int xceiverCount, final String networkLocation,
   final AdminStates adminState, final String upgradeDomain,
-  final long lastBlockReportTime, final long lastBlockReportMonotonic) {
+  final long lastBlockReportTime, final long lastBlockReportMonotonic,
+   final int blockCount) {
 super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
 ipcPort);
 this.capacity = capacity;
@@ -157,6 +161,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.upgradeDomain = upgradeDomain;
 this.lastBlockReportTime = lastBlockReportTime;
 this.lastBlockReportMonotonic = lastBlockReportMonotonic;
+this.numBlocks = blockCount;
   }
 
   /** Network location name. */
@@ -247,6 +252,13 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   public long getLastUpdateMonotonic() { return lastUpdateMonotonic;}
 
   /**
+   * @return Num of Blocks
+   */
+  public int getNumBlocks() {
+return numBlocks;
+  }
+
+  /**
* Set lastUpdate monotonic time
*/
   public void setLastUpdateMonotonic(long lastUpdateMonotonic) {
@@ -301,6 +313,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.xceiverCount = xceiverCount;
   }
 
+  /** Sets number of blocks. */
+  public void setNumBlocks(int blockCount) {
+this.numBlocks = blockCount;
+  }
+
   /** network location */
   @Override
   public String getNetworkLocation() {return location;}
@@ -351,6 +368,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 float cacheUsedPercent = getCacheUsedPercent();
 float cacheRemainingPercent = getCacheRemainingPercent();
 String lookupName = NetUtils.getHostNameOfIP(getName());
+int blockCount = getNumBlocks();
 
 buffer.append("Name: ").append(getName());
 if (lookupName != null) {
@@ -406,6 +424,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   

[24/50] hadoop git commit: Updated timeline reader to use AuthenticationFilter

2018-03-13 Thread aengineer
Updated timeline reader to use AuthenticationFilter

Change-Id: I961771589180c1eb377d36c37a79aa23754effbf
(cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea18e70a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea18e70a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea18e70a

Branch: refs/heads/HDFS-7240
Commit: ea18e70a74e811ffa48c7e18e68510dd37dda63d
Parents: fa6a8b7
Author: Wangda Tan 
Authored: Thu Mar 8 09:23:45 2018 -0800
Committer: Wangda Tan 
Committed: Fri Mar 9 22:51:08 2018 -0800

--
 .../TimelineReaderAuthenticationFilterInitializer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea18e70a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
index e0e1f4d..6a3658d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -20,11 +20,11 @@ package 
org.apache.hadoop.yarn.server.timelineservice.reader.security;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import 
org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
 
 /**
- * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * Filter initializer to initialize {@link AuthenticationFilter}
  * for ATSv2 timeline reader server with timeline service specific
  * configurations.
  */
@@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
 TimelineAuthenticationFilterInitializer{
 
   /**
-   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * Initializes {@link AuthenticationFilter}
* 
-   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value TimelineAuthenticationFilterInitializer#PREFIX}.
*
@@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
   public void initFilter(FilterContainer container, Configuration conf) {
 setAuthFilterConfig(conf);
 container.addGlobalFilter("Timeline Reader Authentication Filter",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 getFilterConfig());
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] hadoop git commit: HDFS-13241. RBF: TestRouterSafemode failed if the port 8888 is in use. Contributed by maobaolong.

2018-03-13 Thread aengineer
HDFS-13241. RBF: TestRouterSafemode failed if the port  is in use. 
Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91c82c90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91c82c90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91c82c90

Branch: refs/heads/HDFS-7240
Commit: 91c82c90f05ea75fe59c6ffad3dc3fcac1429e9e
Parents: ff31d8a
Author: Inigo Goiri 
Authored: Mon Mar 12 17:28:15 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Mar 12 17:28:15 2018 -0700

--
 .../hdfs/server/federation/router/TestRouterSafemode.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91c82c90/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
index 9299f77..e05f727 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
@@ -32,6 +32,7 @@ import java.net.URISyntaxException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Time;
@@ -65,6 +66,14 @@ public class TestRouterSafemode {
 // 2 sec post cache update before entering safemode (2 intervals)
 conf.setTimeDuration(DFS_ROUTER_SAFEMODE_EXPIRATION,
 TimeUnit.SECONDS.toMillis(2), TimeUnit.MILLISECONDS);
+
+conf.set(DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY, "0.0.0.0");
+conf.set(DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0");
+conf.set(DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "127.0.0.1:0");
+
 // RPC + State Store + Safe Mode only
 conf = new RouterConfigBuilder(conf)
 .rpc()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] hadoop git commit: HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham.

2018-03-13 Thread aengineer
HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4eeff62f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4eeff62f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4eeff62f

Branch: refs/heads/HDFS-7240
Commit: 4eeff62f6925991bca725b1ede5308055817de80
Parents: 7b0dc31
Author: Hanisha Koneru 
Authored: Fri Mar 9 15:27:17 2018 -0800
Committer: Hanisha Koneru 
Committed: Fri Mar 9 15:27:17 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html  | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 3 +++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eeff62f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 96b1210..a928425 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -43,6 +43,9 @@
   
 Browse the file system
 Logs
+Metrics
+Configuration
+Process Thread Dump
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eeff62f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 3700a5e..29f114b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -44,6 +44,9 @@
   
 Browse the file system
 Logs
+Metrics
+Configuration
+Process Thread Dump
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] hadoop git commit: Revert "YARN-7891. LogAggregationIndexedFileController should support read from HAR file. (Xuan Gong via wangda)"

2018-03-13 Thread aengineer
Revert "YARN-7891. LogAggregationIndexedFileController should support read from 
HAR file. (Xuan Gong via wangda)"

This reverts commit 4d53ef7eefb14661d824924e503a910de1ae997f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e718ac59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e718ac59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e718ac59

Branch: refs/heads/HDFS-7240
Commit: e718ac597f2225cb4946e1ac4b3986c336645643
Parents: 19ae442
Author: Wangda Tan 
Authored: Wed Mar 7 15:42:29 2018 -0800
Committer: Wangda Tan 
Committed: Wed Mar 7 15:46:47 2018 -0800

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   4 -
 .../LogAggregationIndexedFileController.java|  60 ++---
 .../TestLogAggregationIndexFileController.java  |  54 
 .../application_123456_0001.har/_SUCCESS|   0
 .../application_123456_0001.har/_index  |   3 -
 .../application_123456_0001.har/_masterindex|   2 -
 .../application_123456_0001.har/part-0  | Bin 4123 -> 0 bytes
 .../RegisterNodeManagerRequest.java |   5 -
 .../pb/RegisterNodeManagerRequestPBImpl.java|  79 --
 .../yarn_server_common_service_protos.proto |   1 -
 .../hadoop/yarn/server/nodemanager/Context.java |   4 +-
 .../yarn/server/nodemanager/NodeManager.java|  12 -
 .../nodemanager/NodeStatusUpdaterImpl.java  |  14 --
 .../containermanager/ContainerManagerImpl.java  |  15 --
 .../logaggregation/AppLogAggregatorImpl.java|  11 +-
 .../tracker/NMLogAggregationStatusTracker.java  | 244 ---
 .../amrmproxy/BaseAMRMProxyTest.java|   6 -
 .../TestNMLogAggregationStatusTracker.java  | 124 --
 .../resourcemanager/ResourceTrackerService.java |  17 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |   6 -
 .../rmnode/RMNodeStartedEvent.java  |  11 -
 21 files changed, 26 insertions(+), 646 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e718ac59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 5378072..a235478 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -249,10 +249,6 @@
 
src/test/resources/application_1440536969523_0001.har/part-0
 
src/test/resources/application_1440536969523_0001.har/_masterindex
 
src/test/resources/application_1440536969523_0001.har/_SUCCESS
-
src/test/resources/application_123456_0001.har/_index
-
src/test/resources/application_123456_0001.har/part-0
-
src/test/resources/application_123456_0001.har/_masterindex
-
src/test/resources/application_123456_0001.har/_SUCCESS
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e718ac59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 5bba2e0..56bae26 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -495,21 +495,16 @@ public class LogAggregationIndexedFileController
 boolean getAllContainers = (containerIdStr == null
 || containerIdStr.isEmpty());
 long size = logRequest.getBytes();
-RemoteIterator nodeFiles = LogAggregationUtils
-.getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner(),
+List nodeFiles = LogAggregationUtils
+.getRemoteNodeFileList(conf, appId, logRequest.getAppOwner(),
 this.remoteRootLogDir, this.remoteRootLogDirSuffix);
-if (!nodeFiles.hasNext()) {
+if (nodeFiles.isEmpty()) {
   throw new IOException("There is no available log fils for "
   + "application:" + appId);
 }
-List allFiles = getAllNodeFiles(nodeFiles, appId);
-if (allFiles.isEmpty()) {
-  throw 

[30/50] hadoop git commit: HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. Contributed by Takanobu Asanuma

2018-03-13 Thread aengineer
HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. 
Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39a5fbae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39a5fbae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39a5fbae

Branch: refs/heads/HDFS-7240
Commit: 39a5fbae479ecee3a563e2f4eb937471fbf666f8
Parents: ddb67ca
Author: Chris Douglas 
Authored: Mon Mar 12 14:29:44 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 14:29:44 2018 -0700

--
 .../hadoop/hdfs/TestReconstructStripedFile.java | 48 +++
 ...econstructStripedFileWithRandomECPolicy.java | 49 
 2 files changed, 78 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39a5fbae/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 7201e11..1e93a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -67,14 +68,13 @@ import org.junit.Test;
 public class TestReconstructStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestReconstructStripedFile.class);
 
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlkNum = ecPolicy.getNumDataUnits();
-  private final int parityBlkNum = ecPolicy.getNumParityUnits();
-  private final int cellSize = ecPolicy.getCellSize();
-  private final int blockSize = cellSize * 3;
-  private final int groupSize = dataBlkNum + parityBlkNum;
-  private final int dnNum = groupSize + parityBlkNum;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlkNum;
+  private int parityBlkNum;
+  private int cellSize;
+  private int blockSize;
+  private int groupSize;
+  private int dnNum;
 
   static {
 GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
@@ -95,8 +95,20 @@ public class TestReconstructStripedFile {
   private Map dnMap = new HashMap<>();
   private final Random random = new Random();
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+dataBlkNum = ecPolicy.getNumDataUnits();
+parityBlkNum = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+blockSize = cellSize * 3;
+groupSize = dataBlkNum + parityBlkNum;
+dnNum = groupSize + parityBlkNum;
+
 conf = new Configuration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setInt(
@@ -114,10 +126,8 @@ public class TestReconstructStripedFile {
 cluster.waitActive();
 
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
-fs.getClient().setErasureCodingPolicy("/",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(ecPolicy.getName());
+fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
 
 List datanodes = cluster.getDataNodes();
 for (int i = 0; i < dnNum; i++) {
@@ -432,7 +442,7 @@ public class TestReconstructStripedFile {
 
 BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
 new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, 
liveIndices,
-StripedFileTestUtil.getDefaultECPolicy());
+ecPolicy);
 List ecTasks = new ArrayList<>();
 ecTasks.add(invalidECInfo);
 dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
@@ -461,7 +471,8 @@ public class TestReconstructStripedFile {
 .numDataNodes(numDataNodes).build();
 cluster.waitActive();
 fs = cluster.getFileSystem();
-ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
+ErasureCodingPolicy policy = ecPolicy;
+fs.enableErasureCodingPolicy(policy.getName());
 fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
 final int fileLen = cellSize * 

[14/50] hadoop git commit: HADOOP-15293. TestLogLevel fails on Java 9

2018-03-13 Thread aengineer
HADOOP-15293. TestLogLevel fails on Java 9

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99ab511c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99ab511c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99ab511c

Branch: refs/heads/HDFS-7240
Commit: 99ab511cbac570bea9d31a55898b95590a8e3159
Parents: 4f39506
Author: Takanobu Asanuma 
Authored: Fri Mar 9 10:20:35 2018 -0800
Committer: Akira Ajisaka 
Committed: Fri Mar 9 10:20:35 2018 -0800

--
 .../src/test/java/org/apache/hadoop/log/TestLogLevel.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99ab511c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
index 16b4071..fd30b50 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
@@ -358,7 +358,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
 } catch (SSLException e) {
   GenericTestUtils.assertExceptionContains("Error while authenticating "
   + "with endpoint", e);
-  GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e
+  GenericTestUtils.assertExceptionContains("recognized SSL message", e
   .getCause());
 }
   }
@@ -379,7 +379,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
 } catch (SSLException e) {
   GenericTestUtils.assertExceptionContains("Error while authenticating "
   + "with endpoint", e);
-  GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e
+  GenericTestUtils.assertExceptionContains("recognized SSL message", e
   .getCause());
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] hadoop git commit: HADOOP-15297. Make S3A etag => checksum feature optional. Contributed by Steve Loughran.

2018-03-13 Thread aengineer
HADOOP-15297. Make S3A etag => checksum feature optional.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd05871b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd05871b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd05871b

Branch: refs/heads/HDFS-7240
Commit: dd05871b8b57303fe0b0c652e03257b59c191802
Parents: e1f5251
Author: Steve Loughran 
Authored: Mon Mar 12 14:01:42 2018 +
Committer: Steve Loughran 
Committed: Mon Mar 12 14:01:42 2018 +

--
 .../src/main/resources/core-default.xml | 11 
 .../org/apache/hadoop/fs/s3a/Constants.java | 11 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 38 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java   |  1 +
 .../org/apache/hadoop/fs/s3a/Statistic.java |  2 +
 .../src/site/markdown/tools/hadoop-aws/index.md | 41 ++-
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   | 53 +---
 7 files changed, 136 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 6d6ed42..9074300 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1547,6 +1547,17 @@
   
 
 
+
+  fs.s3a.etag.checksum.enabled
+  false
+  
+Should calls to getFileChecksum() return the etag value of the remote
+object.
+WARNING: if enabled, distcp operations between HDFS and S3 will fail unless
+-skipcrccheck is set.
+  
+
+
 
 
   fs.wasb.impl

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index faec784..4c95843 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -542,4 +542,15 @@ public final class Constants {
*/
   public static final String RETRY_THROTTLE_INTERVAL_DEFAULT = "500ms";
 
+  /**
+   * Should etags be exposed as checksums?
+   */
+  public static final String ETAG_CHECKSUM_ENABLED =
+  "fs.s3a.etag.checksum.enabled";
+
+  /**
+   * Default value: false.
+   */
+  public static final boolean ETAG_CHECKSUM_ENABLED_DEFAULT = false;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index eb65cfa..4b0c208 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2993,17 +2993,21 @@ public class S3AFileSystem extends FileSystem 
implements StreamCapabilities {
   }
 
   /**
-   * Get the etag of a object at the path via HEAD request and return it
-   * as a checksum object. This has the whatever guarantees about equivalence
-   * the S3 implementation offers.
+   * When enabled, get the etag of a object at the path via HEAD request and
+   * return it as a checksum object.
* 
*   If a tag has not changed, consider the object unchanged.
*   Two tags being different does not imply the data is different.
* 
* Different S3 implementations may offer different guarantees.
+   *
+   * This check is (currently) only made if
+   * {@link Constants#ETAG_CHECKSUM_ENABLED} is set; turning it on
+   * has caused problems with Distcp (HADOOP-15273).
+   *
* @param f The file path
* @param length The length of the file range for checksum calculation
-   * @return The EtagChecksum or null if checksums are not supported.
+   * @return The EtagChecksum or null if checksums are not enabled or 
supported.
* @throws IOException IO failure
* @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html;>Common
 Response Headers
*/
@@ -3012,15 +3016,23 @@ public class S3AFileSystem extends 

[12/50] hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao

2018-03-13 Thread aengineer
MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are 
both present twice in mapred-default.xml. Contributed by Sen Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fa3a63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fa3a63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fa3a63

Branch: refs/heads/HDFS-7240
Commit: 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf
Parents: 3f7bd46
Author: Jason Lowe 
Authored: Fri Mar 9 10:41:16 2018 -0600
Committer: Jason Lowe 
Committed: Fri Mar 9 10:41:16 2018 -0600

--
 .../src/main/resources/mapred-default.xml   | 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fa3a63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d0e5a2d..cf8be33 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -422,22 +422,6 @@
 
 
 
-  mapreduce.map.cpu.vcores
-  1
-  
-  The number of virtual cores required for each map task.
-  
-
-
-
-  mapreduce.reduce.cpu.vcores
-  1
-  
-  The number of virtual cores required for each reduce task.
-  
-
-
-
   mapreduce.reduce.merge.inmem.threshold
   1000
   The threshold, in terms of the number of files


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] hadoop git commit: HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran.

2018-03-13 Thread aengineer
HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log 
output.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f7bd467
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f7bd467
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f7bd467

Branch: refs/heads/HDFS-7240
Commit: 3f7bd467979042161897a7c91c5b094b83164f75
Parents: 122805b
Author: Steve Loughran 
Authored: Fri Mar 9 10:44:07 2018 +
Committer: Steve Loughran 
Committed: Fri Mar 9 10:44:07 2018 +

--
 .../hadoop-common/src/main/conf/log4j.properties  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f7bd467/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 5f4b22b..c31e54f 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -306,3 +306,6 @@ 
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize}
 #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+# Log levels of third-party libraries
+log4j.logger.org.apache.commons.beanutils=WARN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] hadoop git commit: HDFS-11399. Many tests fails in Windows due to injecting disk failures. Contributed by Yiqun Lin.

2018-03-13 Thread aengineer
HDFS-11399. Many tests fails in Windows due to injecting disk failures. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac627f56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac627f56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac627f56

Branch: refs/heads/HDFS-7240
Commit: ac627f561f0946e98a650850fb507536cbd2f2c4
Parents: dd05871
Author: Inigo Goiri 
Authored: Mon Mar 12 09:58:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Mar 12 09:58:56 2018 -0700

--
 .../server/blockmanagement/TestBlockStatsMXBean.java|  5 +
 .../hdfs/server/datanode/TestDataNodeVolumeFailure.java | 12 
 2 files changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac627f56/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 64364cb..11bfff8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -160,6 +161,10 @@ public class TestBlockStatsMXBean {
 
   @Test
   public void testStorageTypeStatsWhenStorageFailed() throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
 DFSTestUtil.createFile(cluster.getFileSystem(),
 new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
 Map storageTypeStatsMap = cluster

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac627f56/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index e73337b..6385367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -293,6 +293,10 @@ public class TestDataNodeVolumeFailure {
   @Test(timeout=1)
   public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
   throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
 // make both data directories to fail on dn0
 final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
 final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
@@ -314,6 +318,10 @@ public class TestDataNodeVolumeFailure {
   @Test
   public void testVolumeFailureRecoveredByHotSwappingVolume()
   throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
 final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
 final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
 final DataNode dn0 = cluster.getDataNodes().get(0);
@@ -354,6 +362,10 @@ public class TestDataNodeVolumeFailure {
   @Test
   public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
   throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
 final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
 final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
 final File dn0VolNew = new File(dataDir, "data_new");


-
To unsubscribe, e-mail: 

[01/50] hadoop git commit: YARN-7891. LogAggregationIndexedFileController should support read from HAR file. (Xuan Gong via wangda)

2018-03-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 faa01f320 -> a5dfae69f


YARN-7891. LogAggregationIndexedFileController should support read from HAR 
file. (Xuan Gong via wangda)

Change-Id: I16e081f21c5f80160564c49cc49d103bd8eb7e16


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/583f4594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/583f4594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/583f4594

Branch: refs/heads/HDFS-7240
Commit: 583f4594314b3db25b57b1e46ea8026eab21f932
Parents: e718ac5
Author: Wangda Tan 
Authored: Wed Mar 7 15:46:22 2018 -0800
Committer: Wangda Tan 
Committed: Wed Mar 7 15:46:47 2018 -0800

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   4 ++
 .../LogAggregationIndexedFileController.java|  60 +--
 .../TestLogAggregationIndexFileController.java  |  54 +
 .../application_123456_0001.har/_SUCCESS|   0
 .../application_123456_0001.har/_index  |   3 +
 .../application_123456_0001.har/_masterindex|   2 +
 .../application_123456_0001.har/part-0  | Bin 0 -> 4123 bytes
 7 files changed, 104 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/583f4594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index a235478..5378072 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -249,6 +249,10 @@
 
src/test/resources/application_1440536969523_0001.har/part-0
 
src/test/resources/application_1440536969523_0001.har/_masterindex
 
src/test/resources/application_1440536969523_0001.har/_SUCCESS
+
src/test/resources/application_123456_0001.har/_index
+
src/test/resources/application_123456_0001.har/part-0
+
src/test/resources/application_123456_0001.har/_masterindex
+
src/test/resources/application_123456_0001.har/_SUCCESS
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/583f4594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 56bae26..5bba2e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -495,16 +495,21 @@ public class LogAggregationIndexedFileController
 boolean getAllContainers = (containerIdStr == null
 || containerIdStr.isEmpty());
 long size = logRequest.getBytes();
-List nodeFiles = LogAggregationUtils
-.getRemoteNodeFileList(conf, appId, logRequest.getAppOwner(),
+RemoteIterator nodeFiles = LogAggregationUtils
+.getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner(),
 this.remoteRootLogDir, this.remoteRootLogDirSuffix);
-if (nodeFiles.isEmpty()) {
+if (!nodeFiles.hasNext()) {
   throw new IOException("There is no available log fils for "
   + "application:" + appId);
 }
-Map checkSumFiles = parseCheckSumFiles(nodeFiles);
+List allFiles = getAllNodeFiles(nodeFiles, appId);
+if (allFiles.isEmpty()) {
+  throw new IOException("There is no available log fils for "
+  + "application:" + appId);
+}
+Map checkSumFiles = parseCheckSumFiles(allFiles);
 List fileToRead = getNodeLogFileToRead(
-nodeFiles, nodeIdStr, appId);
+allFiles, nodeIdStr, appId);
 byte[] buf = new byte[65535];
 for (FileStatus thisNodeFile : fileToRead) {
   String nodeName = thisNodeFile.getPath().getName();
@@ -609,16 +614,21 @@ public class LogAggregationIndexedFileController
 containerIdStr.isEmpty());
 String nodeIdStr = (nodeId == null || nodeId.isEmpty()) ? null
 : LogAggregationUtils.getNodeString(nodeId);
-

[13/50] hadoop git commit: YARN-8000. Yarn Service component instance name shows up as component name in container record. Contributed by Chandni Singh

2018-03-13 Thread aengineer
YARN-8000. Yarn Service component instance name shows up as component name in 
container record. Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f395063
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f395063
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f395063

Branch: refs/heads/HDFS-7240
Commit: 4f395063bbae1636d4c59bc962916d78694b50d3
Parents: 32fa3a6
Author: Billie Rinaldi 
Authored: Fri Mar 9 08:50:28 2018 -0800
Committer: Billie Rinaldi 
Committed: Fri Mar 9 08:50:28 2018 -0800

--
 .../main/resources/definition/YARN-Services-Examples.md |  4 ++--
 .../hadoop/yarn/service/api/records/Container.java  | 12 ++--
 .../src/site/markdown/yarn-service/YarnServiceAPI.md|  4 ++--
 3 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f395063/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index 00b21dd..e4cdc7b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -72,7 +72,7 @@ Note, lifetime value of -1 means unlimited lifetime.
 "state": "READY",
 "launch_time": 1504051512412,
 "bare_host": "10.22.8.143",
-"component_name": "hello-0"
+"component_instance_name": "hello-0"
 },
 {
 "id": "container_e03_1503963985568_0002_01_02",
@@ -81,7 +81,7 @@ Note, lifetime value of -1 means unlimited lifetime.
 "state": "READY",
 "launch_time": 1504051536450,
 "bare_host": "10.22.8.143",
-"component_name": "hello-1"
+"component_instance_name": "hello-1"
 }
 ],
 "launch_command": "./start_nginx.sh",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f395063/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java
index af06542..1ffd85f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java
@@ -173,20 +173,20 @@ public class Container extends BaseResource {
   }
 
   /**
-   * Name of the component that this container instance belongs to.
+   * Name of the component instance that this container instance belongs to.
**/
-  public Container componentName(String componentName) {
-this.componentInstanceName = componentName;
+  public Container componentInstanceName(String componentInstanceName) {
+this.componentInstanceName = componentInstanceName;
 return this;
   }
 
-  @ApiModelProperty(example = "null", value = "Name of the component that this 
container instance belongs to.")
-  @JsonProperty("component_name")
+  @ApiModelProperty(example = "null", value = "Name of the component instance 
that this container instance belongs to.")
+  @JsonProperty("component_instance_name")
   public String getComponentInstanceName() {
 return componentInstanceName;
   }
 
-  @XmlElement(name = "component_name")
+  @XmlElement(name = "component_instance_name")
   public void setComponentInstanceName(String componentInstanceName) {
 this.componentInstanceName = componentInstanceName;
   }


[31/50] hadoop git commit: YARN-8024. LOG in class MaxRunningAppsEnforcer is initialized with a faulty class. Contributed by Sen Zhao.

2018-03-13 Thread aengineer
YARN-8024. LOG in class MaxRunningAppsEnforcer is initialized with a faulty 
class. Contributed by Sen Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff31d8ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff31d8ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff31d8ae

Branch: refs/heads/HDFS-7240
Commit: ff31d8aefa0490ccf1d44fe8a738fdc002aa712c
Parents: 39a5fba
Author: Yufei Gu 
Authored: Mon Mar 12 16:35:18 2018 -0700
Committer: Yufei Gu 
Committed: Mon Mar 12 16:35:26 2018 -0700

--
 .../resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff31d8ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 02e2d97..3f1ad0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -37,7 +37,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicat
  * constraints
  */
 public class MaxRunningAppsEnforcer {
-  private static final Log LOG = LogFactory.getLog(FairScheduler.class);
+  private static final Log LOG = LogFactory.getLog(
+  MaxRunningAppsEnforcer.class);
   
   private final FairScheduler scheduler;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] hadoop git commit: HDFS-13252. Code refactoring: Remove Diff.ListType.

2018-03-13 Thread aengineer
HDFS-13252. Code refactoring: Remove Diff.ListType.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba0da278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba0da278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba0da278

Branch: refs/heads/HDFS-7240
Commit: ba0da2785d251745969f88a50d33ce61876d91aa
Parents: 4eeff62
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Mar 9 15:25:41 2018 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Mar 9 15:50:26 2018 -0800

--
 .../hdfs/server/namenode/FSDirRenameOp.java |   3 +-
 .../hdfs/server/namenode/INodeDirectory.java|  10 +-
 .../hdfs/server/namenode/INodeReference.java|   4 -
 .../snapshot/DirectorySnapshottableFeature.java |   5 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  | 131 +++
 .../snapshot/FSImageFormatPBSnapshot.java   |   6 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  11 +-
 .../snapshot/SnapshotDiffListingInfo.java   |  15 +--
 .../snapshot/SnapshotFSImageFormat.java |   4 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java  | 131 +--
 .../namenode/snapshot/SnapshotTestHelper.java   |  79 ++-
 .../snapshot/TestRenameWithSnapshots.java   | 129 +++---
 .../snapshot/TestSetQuotaWithSnapshot.java  |   6 +-
 13 files changed, 260 insertions(+), 274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index efc8da2..6162ceb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -588,8 +588,7 @@ class FSDirRenameOp {
 private INode srcChild;
 private INode oldDstChild;
 
-RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP)
-throws QuotaExceededException {
+RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP) 
{
   this.fsd = fsd;
   this.srcIIP = srcIIP;
   this.dstIIP = dstIIP;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 6594a56..72ad9e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -39,7 +39,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFea
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-import org.apache.hadoop.hdfs.util.Diff.ListType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -353,7 +352,7 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
 // replace the instance in the created list of the diff list
 DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
 if (sf != null) {
-  sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
+  sf.getDiffs().replaceCreatedChild(oldChild, newChild);
 }
 
 // update the inodeMap
@@ -746,8 +745,8 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
   final INode newChild) throws QuotaExceededException {
 DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
 assert sf != null : "Directory does not have snapshot feature";
-sf.getDiffs().removeChild(ListType.DELETED, oldChild);
-sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
+sf.getDiffs().removeDeletedChild(oldChild);
+sf.getDiffs().replaceCreatedChild(oldChild, newChild);
 addChild(newChild, true, Snapshot.CURRENT_STATE_ID);
   }
   
@@ -761,8 +760,7 

[10/50] hadoop git commit: HDFS-13233. RBF: MountTableResolver doesn't return the correct mount point of the given path. Contributed by wangzhiyuan.

2018-03-13 Thread aengineer
HDFS-13233. RBF: MountTableResolver doesn't return the correct mount point of 
the given path. Contributed by wangzhiyuan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/122805b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/122805b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/122805b4

Branch: refs/heads/HDFS-7240
Commit: 122805b43acff2b094bd984fa76dbc8d2e110edd
Parents: 113f401
Author: Yiqun Lin 
Authored: Fri Mar 9 15:42:57 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Mar 9 15:42:57 2018 +0800

--
 .../federation/resolver/MountTableResolver.java | 13 ++-
 .../resolver/TestMountTableResolver.java| 23 
 .../federation/router/TestRouterQuota.java  |  2 +-
 3 files changed, 36 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/122805b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 374e3ba..dac6f7f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -521,6 +521,17 @@ public class MountTableResolver
 return this.defaultNameService;
   }
 
+  private boolean isParentEntry(final String path, final String parent) {
+if (!path.startsWith(parent)) {
+  return false;
+}
+if (path.equals(parent)) {
+  return true;
+}
+return path.charAt(parent.length()) == Path.SEPARATOR_CHAR
+|| parent.equals(Path.SEPARATOR);
+  }
+
   /**
* Find the deepest mount point for a path.
* @param path Path to look for.
@@ -530,7 +541,7 @@ public class MountTableResolver
 readLock.lock();
 try {
   Entry entry = this.tree.floorEntry(path);
-  while (entry != null && !path.startsWith(entry.getKey())) {
+  while (entry != null && !isParentEntry(path, entry.getKey())) {
 entry = this.tree.lowerEntry(entry.getKey());
   }
   if (entry == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/122805b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index fa2f89c..a09daf0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -179,6 +179,29 @@ public class TestMountTableResolver {
   }
 
   @Test
+  public void testGetMountPoint() throws IOException {
+// Check get the mount table entry for a path
+MountTable mtEntry;
+mtEntry = mountTable.getMountPoint("/");
+assertTrue(mtEntry.getSourcePath().equals("/"));
+
+mtEntry = mountTable.getMountPoint("/user");
+assertTrue(mtEntry.getSourcePath().equals("/user"));
+
+mtEntry = mountTable.getMountPoint("/user/a");
+assertTrue(mtEntry.getSourcePath().equals("/user/a"));
+
+mtEntry = mountTable.getMountPoint("/user/a/");
+assertTrue(mtEntry.getSourcePath().equals("/user/a"));
+
+mtEntry = mountTable.getMountPoint("/user/a/11");
+assertTrue(mtEntry.getSourcePath().equals("/user/a"));
+
+mtEntry = mountTable.getMountPoint("/user/a1");
+assertTrue(mtEntry.getSourcePath().equals("/user"));
+  }
+
+  @Test
   public void testGetMountPoints() throws IOException {
 
 // Check getting all mount points (virtual and real) beneath a path

http://git-wip-us.apache.org/repos/asf/hadoop/blob/122805b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
 

[22/50] hadoop git commit: Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu."

2018-03-13 Thread aengineer
Revert "HADOOP-14077. Add ability to access jmx via proxy.  Contributed by 
Yuanbo Liu."

This reverts commit 172b23af33554b7d58fd41b022d983bcc2433da7.

(cherry picked from commit d0d2d4c51e9534e08893ae14cf3fff7b2ee70b1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a8dade9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a8dade9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a8dade9

Branch: refs/heads/HDFS-7240
Commit: 3a8dade9b1bf01cf75fc68cecb351c23302cdee5
Parents: 4743d4a
Author: Owen O'Malley 
Authored: Thu Mar 1 09:59:08 2018 -0800
Committer: Wangda Tan 
Committed: Fri Mar 9 22:46:30 2018 -0800

--
 .../AuthenticationWithProxyUserFilter.java  |  43 ---
 .../hadoop/http/TestHttpServerWithSpengo.java   |  15 +--
 .../mapreduce/v2/app/webapp/AppController.java  |   7 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 113 ++-
 4 files changed, 85 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a8dade9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
index c97f8ad..ea9b282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
@@ -20,10 +20,9 @@ package org.apache.hadoop.security;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.servlet.FilterChain;
 import javax.servlet.ServletException;
@@ -42,9 +41,6 @@ import java.util.List;
  */
 public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
 
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class);
-
   /**
* Constant used in URL's query string to perform a proxy user request, the
* value of the DO_AS parameter is the user the request will be
@@ -70,30 +66,29 @@ public class AuthenticationWithProxyUserFilter extends 
AuthenticationFilter {
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
 
-final String proxyUser = getDoAs(request);
+// authorize proxy user before calling next filter.
+String proxyUser = getDoAs(request);
 if (proxyUser != null) {
+  UserGroupInformation realUser =
+  UserGroupInformation.createRemoteUser(request.getRemoteUser());
+  UserGroupInformation proxyUserInfo =
+  UserGroupInformation.createProxyUser(proxyUser, realUser);
 
-  // Change the remote user after proxy user is authorized.
-  final HttpServletRequest finalReq = request;
-  request = new HttpServletRequestWrapper(finalReq) {
-
-private String getRemoteOrProxyUser() throws AuthorizationException {
-  UserGroupInformation realUser =
-  UserGroupInformation.createRemoteUser(finalReq.getRemoteUser());
-  UserGroupInformation proxyUserInfo =
-  UserGroupInformation.createProxyUser(proxyUser, realUser);
-  ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr());
-  return proxyUserInfo.getUserName();
-}
+  try {
+ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
+  } catch (AuthorizationException ex) {
+HttpExceptionUtils.createServletExceptionResponse(response,
+HttpServletResponse.SC_FORBIDDEN, ex);
+// stop filter chain if there is an Authorization Exception.
+return;
+  }
 
+  final UserGroupInformation finalProxyUser = proxyUserInfo;
+  // Change the remote user after proxy user is authorized.
+  request = new HttpServletRequestWrapper(request) {
 @Override
 public String getRemoteUser() {
-  try {
-return getRemoteOrProxyUser();
-  } catch (AuthorizationException ex) {
-LOG.error("Unable to verify proxy user: " + ex.getMessage(), ex);
-  

[04/50] hadoop git commit: YARN-8011. TestOpportunisticContainerAllocatorAMService#testContainerPromoteAndDemoteBeforeContainerStart fails intermittently. Contributed by Tao Yang.

2018-03-13 Thread aengineer
YARN-8011. 
TestOpportunisticContainerAllocatorAMService#testContainerPromoteAndDemoteBeforeContainerStart
 fails intermittently. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b451889e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b451889e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b451889e

Branch: refs/heads/HDFS-7240
Commit: b451889e8e83f7977f2b76789c61e823e2d40487
Parents: 4cc9a6d
Author: Weiwei Yang 
Authored: Thu Mar 8 18:13:36 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Mar 8 18:13:36 2018 +0800

--
 ...pportunisticContainerAllocatorAMService.java | 29 ++--
 1 file changed, 15 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b451889e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
index 1af930f..efa76bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
@@ -243,13 +243,13 @@ public class TestOpportunisticContainerAllocatorAMService 
{
 null, ExecutionType.GUARANTEED)));
 // Node on same host should not result in allocation
 sameHostDiffNode.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
 allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
 Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
 
 // Wait for scheduler to process all events
 dispatcher.waitForEventThreadToWait();
-Thread.sleep(1000);
+rm.drainEvents();
 // Verify Metrics After OPP allocation (Nothing should change again)
 verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
 
@@ -286,7 +286,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 
 // Ensure after correct node heartbeats, we should get the allocation
 allocNode.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
 allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
 Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
 Container uc =
@@ -303,7 +303,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 nm2.nodeHeartbeat(true);
 nm3.nodeHeartbeat(true);
 nm4.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
 
 // Verify that the container is still in ACQUIRED state wrt the RM.
 RMContainer rmContainer = ((CapacityScheduler) scheduler)
@@ -325,6 +325,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 
 // Wait for scheduler to finish processing events
 dispatcher.waitForEventThreadToWait();
+rm.drainEvents();
 // Verify Metrics After OPP allocation :
 // Everything should have reverted to what it was
 verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
@@ -396,7 +397,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 ContainerStatus.newInstance(container.getId(),
 ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
 true);
-Thread.sleep(200);
+rm.drainEvents();
 
 // Verify that container is actually running wrt the RM..
 RMContainer rmContainer = ((CapacityScheduler) scheduler)
@@ -434,7 +435,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 ContainerStatus.newInstance(container.getId(),
 ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
 true);
-Thread.sleep(200);
+rm.drainEvents();
 
 allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
 Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
@@ -521,7 +522,7 @@ public class TestOpportunisticContainerAllocatorAMService {
 ContainerStatus.newInstance(container.getId(),
 ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
   

[20/50] hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.

2018-03-13 Thread aengineer
HDFS-13232. RBF: ConnectionPool should return first usable connection. 
Contributed by Ekanth S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8133cd53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8133cd53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8133cd53

Branch: refs/heads/HDFS-7240
Commit: 8133cd5305d7913453abb2d48da12f672c0ce334
Parents: afe1a3c
Author: Inigo Goiri 
Authored: Fri Mar 9 18:25:05 2018 -0800
Committer: Inigo Goiri 
Committed: Fri Mar 9 18:25:05 2018 -0800

--
 .../federation/router/ConnectionPool.java   |  2 +-
 .../router/TestConnectionManager.java   | 43 
 2 files changed, 44 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8133cd53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
index 5c77c59..5af8a86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
@@ -159,7 +159,7 @@ public class ConnectionPool {
 for (int i=0; i poolMap = connManager.getPools();
+final int totalConns = 10;
+int activeConns = 5;
+
+ConnectionPool pool = new ConnectionPool(
+conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10);
+addConnectionsToPool(pool, totalConns, activeConns);
+poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool);
+
+// All remaining connections should be usable
+final int remainingSlots = totalConns - activeConns;
+for (int i = 0; i < remainingSlots; i++) {
+  ConnectionContext cc = pool.getConnection();
+  assertTrue(cc.isUsable());
+  cc.getClient();
+  

[16/50] hadoop git commit: HDFS-13190. Document WebHDFS support for snapshot diff

2018-03-13 Thread aengineer
HDFS-13190. Document WebHDFS support for snapshot diff

Signed-off-by: Akira Ajisaka 
Signed-off-by: Xiaoyu Yao 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b0dc310
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b0dc310
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b0dc310

Branch: refs/heads/HDFS-7240
Commit: 7b0dc310208ee5bc191c9accb3d1312513145653
Parents: 9a082fb
Author: Lokesh Jain 
Authored: Fri Mar 9 15:04:14 2018 -0800
Committer: Akira Ajisaka 
Committed: Fri Mar 9 15:06:15 2018 -0800

--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 92 
 1 file changed, 92 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b0dc310/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 4a1395e..057ca59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -50,6 +50,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`CHECKACCESS`](#Check_access) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access)
 * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies)
 * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
+* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
 *   HTTP PUT
 * [`CREATE`](#Create_and_Write_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
 * [`MKDIRS`](#Make_a_Directory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@@ -1266,6 +1267,21 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSna
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot
 
+### Get Snapshot Diff
+
+* Submit a HTTP GET request.
+
+curl -i GET "http://:/webhdfs/v1/?op=GETSNAPSHOTDIFF
+   
=="
+
+The client receives a response with a [`SnapshotDiffReport` JSON 
object](#SnapshotDiffReport_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+
{"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}}
+
 Delegation Token Operations
 ---
 
@@ -2043,6 +2059,82 @@ A `BlockStoragePolicies` JSON object represents an array 
of `BlockStoragePolicy`
 }
 ```
 
+### SnapshotDiffReport JSON Schema
+
+```json
+{
+  "name": "SnapshotDiffReport",
+  "type": "object",
+  "properties":
+  {
+"SnapshotDiffReport":
+{
+  "type": "object",
+  "properties"  :
+  {
+"diffList":
+{
+  "description": "An array of DiffReportEntry",
+  "type": "array",
+  "items"   : diffReportEntries,
+  "required": true
+},
+"fromSnapshot":
+{
+  "description": "Source snapshot",
+  "type": "string",
+  "required": true
+},
+"snapshotRoot":
+{
+  "description" : "String representation of snapshot root path",
+  "type": "string",
+  "required": true
+},
+"toSnapshot":
+{
+  "description" : "Destination snapshot",
+  "type": "string",
+  "required": true
+}
+  }
+}
+  }
+}
+```
+
+ DiffReport Entries
+
+JavaScript syntax is used to define `diffReportEntries` so that it can be 
referred in `SnapshotDiffReport` JSON schema.
+
+```javascript
+var diffReportEntries =
+{
+  "type": "object",
+  "properties":
+  {
+"sourcePath":
+{
+  "description" : "Source path name relative to snapshot root",
+  "type": "string",
+  "required": true
+},
+"targetPath":
+{
+  "description" : "Target path relative to snapshot root used for renames",
+  "type": "string",
+  "required": true
+},
+"type":
+{
+  "description" : "Type of diff report entry",
+  "enum": ["CREATE", "MODIFY", "DELETE", "RENAME"],
+  "required": true
+}
+  }
+}
+```
+
 HTTP Query Parameter Dictionary
 ---
 


-
To unsubscribe, e-mail: 

[08/50] hadoop git commit: HADOOP-15280. TestKMS.testWebHDFSProxyUserKerb and TestKMS.testWebHDFSProxyUserSimple fail in trunk. Contributed by Bharat Viswanadham.

2018-03-13 Thread aengineer
HADOOP-15280. TestKMS.testWebHDFSProxyUserKerb and 
TestKMS.testWebHDFSProxyUserSimple fail in trunk. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a906a226
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a906a226
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a906a226

Branch: refs/heads/HDFS-7240
Commit: a906a226458a0b4c4b2df61d9bcf375a1d194925
Parents: 0c2b969
Author: Xiao Chen 
Authored: Thu Mar 8 10:16:37 2018 -0800
Committer: Xiao Chen 
Committed: Thu Mar 8 10:17:02 2018 -0800

--
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a906a226/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index f7ecf44..1189fbf 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -2667,7 +2667,11 @@ public class TestKMS {
   kp.createKey("kbb", new KeyProvider.Options(conf));
   Assert.fail();
 } catch (Exception ex) {
-  Assert.assertTrue(ex.getMessage(), 
ex.getMessage().contains("Forbidden"));
+  GenericTestUtils.assertExceptionContains("Error while " +
+  "authenticating with endpoint", ex);
+  GenericTestUtils.assertExceptionContains("Forbidden", ex
+  .getCause().getCause());
+
 }
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] hadoop git commit: YARN-7944. [UI2] Remove master node link from headers of application pages. Contributed by Yesha Vora.

2018-03-13 Thread aengineer
YARN-7944. [UI2] Remove master node link from headers of application pages. 
Contributed by Yesha Vora.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/113f401f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/113f401f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/113f401f

Branch: refs/heads/HDFS-7240
Commit: 113f401f41ee575cb303ceb647bc243108d93a04
Parents: a906a22
Author: Sunil G 
Authored: Thu Mar 8 23:52:38 2018 +0530
Committer: Sunil G 
Committed: Thu Mar 8 23:53:36 2018 +0530

--
 .../src/main/webapp/app/models/yarn-app-timeline.js | 1 -
 .../hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js   | 5 -
 .../src/main/webapp/app/serializers/yarn-app-timeline.js| 1 -
 .../hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js  | 1 -
 .../hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs   | 2 --
 5 files changed, 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/113f401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-timeline.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-timeline.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-timeline.js
index fa5223f..8b2702f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-timeline.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-timeline.js
@@ -30,7 +30,6 @@ export default DS.Model.extend({
   finishedTime: DS.attr('finishedTime'),
   progress: DS.attr('number'),
   diagnostics: DS.attr('string'),
-  amContainerLogs: DS.attr('string'),
   amHostHttpAddress: DS.attr('string'),
   logAggregationStatus: DS.attr('string'),
   unmanagedApplication: DS.attr('string'),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/113f401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
index 5d0f23b..fcc8490 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
@@ -30,7 +30,6 @@ export default DS.Model.extend({
   finishedTime: DS.attr("finishedTime"),
   progress: DS.attr("number"),
   diagnostics: DS.attr("string"),
-  amContainerLogs: DS.attr("string"),
   amHostHttpAddress: DS.attr("string"),
   masterNodeId: DS.attr("string"),
   logAggregationStatus: DS.attr("string"),
@@ -97,10 +96,6 @@ export default DS.Model.extend({
 );
   }.property("memorySeconds", "vcoreSeconds"),
 
-  masterNodeURL: function() {
-return 
`#/yarn-node/${this.get("masterNodeId")}/${this.get("amHostHttpAddress")}/info/`;
-  }.property("masterNodeId", "amHostHttpAddress"),
-
   progressStyle: function() {
 return "width: " + this.get("progress") + "%";
   }.property("progress"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/113f401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-timeline.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-timeline.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-timeline.js
index 680fe8c..0496d77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-timeline.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-timeline.js
@@ -45,7 +45,6 @@ export default DS.JSONAPISerializer.extend({
 progress: 100,
 applicationType: payload.info.YARN_APPLICATION_TYPE,
 diagnostics: (diagnostics && diagnostics !== 'null')? diagnostics : '',
-amContainerLogs: '',
 amHostHttpAddress: '',
 logAggregationStatus: '',
 unmanagedApplication: 
payload.info.YARN_APPLICATION_UNMANAGED_APPLICATION || 'N/A',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/113f401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
 

[19/50] hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.

2018-03-13 Thread aengineer
HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afe1a3cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afe1a3cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afe1a3cc

Branch: refs/heads/HDFS-7240
Commit: afe1a3ccd56a12fec900360a8a2855c080728e65
Parents: ba0da27
Author: Inigo Goiri 
Authored: Fri Mar 9 17:18:51 2018 -0800
Committer: Inigo Goiri 
Committed: Fri Mar 9 17:18:51 2018 -0800

--
 .../federation/resolver/MountTableResolver.java | 15 +--
 .../resolver/TestMountTableResolver.java| 46 
 2 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index dac6f7f..2c7d1f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -238,9 +238,17 @@ public class MountTableResolver
   Entry entry = it.next();
   PathLocation loc = entry.getValue();
   String src = loc.getSourcePath();
-  if (src.startsWith(path)) {
-LOG.debug("Removing {}", src);
-it.remove();
+  if (src != null) {
+if (src.startsWith(path)) {
+  LOG.debug("Removing {}", src);
+  it.remove();
+}
+  } else {
+String dest = loc.getDefaultLocation().getDest();
+if (dest.startsWith(path)) {
+  LOG.debug("Removing default cache {}", dest);
+  it.remove();
+}
   }
 }
 
@@ -287,6 +295,7 @@ public class MountTableResolver
 if (!oldEntries.contains(srcPath)) {
   // Add node, it does not exist
   this.tree.put(srcPath, entry);
+  invalidateLocationCache(srcPath);
   LOG.info("Added new mount point {} to resolver", srcPath);
 } else {
   // Node exists, check for updates

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index a09daf0..f530fe9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -82,6 +83,7 @@ public class TestMountTableResolver {
 Configuration conf = new Configuration();
 conf.setInt(
 FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE);
+conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0");
 mountTable = new MountTableResolver(conf);
 
 // Root mount point
@@ -479,4 +481,48 @@ public class TestMountTableResolver {
 long cacheSize = mountTable.getCacheSize();
 assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE);
   }
+
+  @Test
+  public void testLocationCache() throws Exception {
+List entries = new ArrayList<>();
+
+// Add entry and test location cache
+Map map1 = getMountTableEntry("1", "/testlocationcache");
+MountTable entry1 = MountTable.newInstance("/testlocationcache", map1);
+entries.add(entry1);
+
+Map map2 = getMountTableEntry("2",
+"/anothertestlocationcache");
+MountTable entry2 = MountTable.newInstance("/anothertestlocationcache",
+map2);
+entries.add(entry2);
+mountTable.refreshEntries(entries);
+

[23/50] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."

2018-03-13 Thread aengineer
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.  
Contribute by Yuanbo Liu."

This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.

Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa6a8b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa6a8b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa6a8b78

Branch: refs/heads/HDFS-7240
Commit: fa6a8b78d481d3b4d355e1bf078f30dd5e09850d
Parents: 3a8dade
Author: Owen O'Malley 
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Wangda Tan 
Committed: Fri Mar 9 22:46:41 2018 -0800

--
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 119 -
 .../hadoop/http/TestHttpServerWithSpengo.java   | 481 ---
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 ---
 5 files changed, 13 insertions(+), 688 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
 import java.util.Map;
 
 /**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
  * 
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends 
FilterInitializer {
   public void initFilter(FilterContainer container, Configuration conf) {
 Map filterConfig = getFilterConfigMap(conf, PREFIX);
 
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
 container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 filterConfig);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
-import 

[07/50] hadoop git commit: HDFS-13232. RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns. Contributed by Chao Sun.

2018-03-13 Thread aengineer
HDFS-13232. RBF: ConnectionManager's cleanup task will compare each pool's own 
active conns with its total conns. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c2b969e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c2b969e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c2b969e

Branch: refs/heads/HDFS-7240
Commit: 0c2b969e0161a068bf9ae013c4b95508dfb90a8a
Parents: 7ef4d94
Author: Inigo Goiri 
Authored: Thu Mar 8 09:32:05 2018 -0800
Committer: Inigo Goiri 
Committed: Thu Mar 8 09:32:05 2018 -0800

--
 .../federation/router/ConnectionManager.java|  59 +-
 .../federation/router/ConnectionPoolId.java |   6 +
 .../router/TestConnectionManager.java   | 114 +++
 3 files changed, 153 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2b969e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 2e45280..594f489 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -32,6 +32,7 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -303,6 +304,38 @@ public class ConnectionManager {
 return JSON.toString(info);
   }
 
+  @VisibleForTesting
+  Map getPools() {
+return this.pools;
+  }
+
+  /**
+   * Clean the unused connections for this pool.
+   *
+   * @param pool Connection pool to cleanup.
+   */
+  @VisibleForTesting
+  void cleanup(ConnectionPool pool) {
+if (pool.getNumConnections() > pool.getMinSize()) {
+  // Check if the pool hasn't been active in a while or not 50% are used
+  long timeSinceLastActive = Time.now() - pool.getLastActiveTime();
+  int total = pool.getNumConnections();
+  int active = pool.getNumActiveConnections();
+  if (timeSinceLastActive > connectionCleanupPeriodMs ||
+  active < MIN_ACTIVE_RATIO * total) {
+// Remove and close 1 connection
+List conns = pool.removeConnections(1);
+for (ConnectionContext conn : conns) {
+  conn.close();
+}
+LOG.debug("Removed connection {} used {} seconds ago. " +
+"Pool has {}/{} connections", pool.getConnectionPoolId(),
+TimeUnit.MILLISECONDS.toSeconds(timeSinceLastActive),
+pool.getNumConnections(), pool.getMaxSize());
+  }
+}
+  }
+
   /**
* Removes stale connections not accessed recently from the pool. This is
* invoked periodically.
@@ -350,32 +383,6 @@ public class ConnectionManager {
 }
   }
 }
-
-/**
- * Clean the unused connections for this pool.
- *
- * @param pool Connection pool to cleanup.
- */
-private void cleanup(ConnectionPool pool) {
-  if (pool.getNumConnections() > pool.getMinSize()) {
-// Check if the pool hasn't been active in a while or not 50% are used
-long timeSinceLastActive = Time.now() - pool.getLastActiveTime();
-int total = pool.getNumConnections();
-int active = getNumActiveConnections();
-if (timeSinceLastActive > connectionCleanupPeriodMs ||
-active < MIN_ACTIVE_RATIO * total) {
-  // Remove and close 1 connection
-  List conns = pool.removeConnections(1);
-  for (ConnectionContext conn : conns) {
-conn.close();
-  }
-  LOG.debug("Removed connection {} used {} seconds ago. " +
-  "Pool has {}/{} connections", pool.getConnectionPoolId(),
-  TimeUnit.MILLISECONDS.toSeconds(timeSinceLastActive),
-  pool.getNumConnections(), pool.getMaxSize());
-}
-  }
-}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2b969e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java

[29/50] hadoop git commit: HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by Gera Shegalov

2018-03-13 Thread aengineer
HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by 
Gera Shegalov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddb67ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddb67ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddb67ca7

Branch: refs/heads/HDFS-7240
Commit: ddb67ca707de896cd0ba5cda3c0d1a2d9edca968
Parents: cceb68f
Author: Chris Douglas 
Authored: Mon Mar 12 13:42:38 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 13:43:27 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 +++
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddb67ca7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 1008583..f851ef6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -180,6 +180,145 @@ Recall that one cannot rename files or directories across 
namenodes or clusters
 
 This will NOT work in the new world if `/user` and `/data` are actually stored 
on different namenodes within a cluster.
 
+Multi-Filesystem I/0 with Nfly Mount Points
+-
+
+HDFS and other distributed filesystems provide data resilience via some sort of
+redundancy such as block replication or more sophisticated distributed 
encoding.
+However, modern setups may be comprised of multiple Hadoop clusters, enterprise
+filers, hosted on and off premise. Nfly mount points make it possible for a
+single logical file to be synchronously replicated by multiple filesystems.
+It's designed for a relatively small files up to a gigabyte. In general it's a
+function of a single core/single network link performance since the logic
+resides in a single client JVM using ViewFs such as FsShell or a
+MapReduce task.
+
+### Basic Configuration
+
+Consider the following example to understand the basic configuration of Nfly.
+Suppose we want to keep the directory `ads` replicated on three filesystems
+represented by URIs: `uri1`, `uri2` and `uri3`.
+
+```xml
+  
+fs.viewfs.mounttable.global.linkNfly../ads
+uri1,uri2,uri3
+  
+```
+Note 2 consecutive `..` in the property name. They arise because of empty
+settings for advanced tweaking of the mount point which we will show in
+subsequent sections. The property value is a comma-separated list of URIs.
+
+URIs may point to different clusters in different regions
+`hdfs://datacenter-east/ads`, `s3a://models-us-west/ads`, 
`hdfs://datacenter-west/ads`
+or in the simplest case to different directories under the same filesystem,
+e.g., `file:/tmp/ads1`, `file:/tmp/ads2`, `file:/tmp/ads3`
+
+All *modifications* performed under the global path `viewfs://global/ads` are
+propagated to all destination URIs if the underlying system is available.
+
+For instance if we create a file via hadoop shell
+```bash
+hadoop fs -touchz viewfs://global/ads/z1
+```
+
+We will find it via local filesystem in the latter configuration
+```bash
+ls -al /tmp/ads*/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads1/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads2/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads3/z1
+```
+
+A read from the global path is processed by the first filesystem that does not
+result in an exception. The order in which filesystems are accessed depends on
+whether they are available at this moment or and whether a topological order
+exists.
+
+### Advanced Configuration
+
+Mount points `linkNfly` can be further configured using parameters passed as a
+comma-separated list of key=value pairs. Following parameters are currently
+supported.
+
+`minReplication=int` determines the minimum number of destinations that have to
+process a write modification without exceptions, if below nfly write is failed.
+It is an configuration error to have minReplication higher than the number of
+target URIs. The default is 2.
+
+If minReplication is lower than the number of target URIs we may have some
+target URIs without latest writes. It can be compensated by employing more
+expensive read operations controlled by the following settings
+
+`readMostRecent=boolean` if set to `true` causes Nfly client to check the path
+under all target URIs instead of just the first one based on the topology 
order.
+Among all available at the moment the one with the most recent modification 
time
+is processed.
+
+`repairOnRead=boolean` if set to `true` causes Nfly to copy most recent replica
+to stale targets such that 

[21/50] hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.

2018-03-13 Thread aengineer
HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4743d4a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4743d4a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4743d4a2

Branch: refs/heads/HDFS-7240
Commit: 4743d4a2c70a213a41804a24c776e6db00e1b90d
Parents: 8133cd5
Author: Yiqun Lin 
Authored: Sat Mar 10 11:28:55 2018 +0800
Committer: Yiqun Lin 
Committed: Sat Mar 10 11:28:55 2018 +0800

--
 .../src/site/markdown/HDFSRouterFederation.md   | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4743d4a2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5412aae..fdaaa11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -29,7 +29,9 @@ Architecture
 
 
 A natural extension to this partitioned federation is to add a layer of 
software responsible for federating the namespaces.
-This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and supports 
rebalancing of data across subclusters.
+This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and will support 
rebalancing of data across subclusters later
+(see more info in 
[HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The 
subclusters in RBF are not required to be the independent HDFS clusters, a 
normal federation cluster
+(with multiple block pools) or a mixed cluster with federation and independent 
cluster is also allowed.
 To accomplish these goals, the federation layer directs block accesses to the 
proper subcluster, maintains the state of the namespaces, and provides 
mechanisms for data rebalancing.
 This layer must be scalable, highly available, and fault tolerant.
 
@@ -324,8 +326,8 @@ The connection to the State Store and the internal caching 
at the Router.
 | Property | Default | Description|
 |: |: |: |
 | dfs.federation.router.store.enable | `true` | If `true`, the Router connects 
to the State Store. |
-| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | 
Class to serialize State Store records. |
-| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class 
to implement the State Store. |
+| dfs.federation.router.store.serializer | 
`org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl`
 | Class to serialize State Store records. |
+| dfs.federation.router.store.driver.class | 
`org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl`
 | Class to implement the State Store. |
 | dfs.federation.router.store.connection.test | 6 | How often to check for 
the connection to the State Store in milliseconds. |
 | dfs.federation.router.cache.ttl | 6 | How often to refresh the State 
Store caches in milliseconds. |
 | dfs.federation.router.store.membership.expiration | 30 | Expiration time 
in milliseconds for a membership record. |
@@ -336,8 +338,8 @@ Forwarding client requests to the right subcluster.
 
 | Property | Default | Description|
 |: |: |: |
-| dfs.federation.router.file.resolver.client.class | MountTableResolver | 
Class to resolve files to subclusters. |
-| dfs.federation.router.namenode.resolver.client.class | 
MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. |
+| dfs.federation.router.file.resolver.client.class | 
`org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class 
to resolve files to subclusters. |
+| dfs.federation.router.namenode.resolver.client.class | 
`org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` 
| Class to resolve the namenode for a subcluster. |
 
 ### Namenode monitoring
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] hadoop git commit: HADOOP-15292. Distcp's use of pread is slowing it down. Contributed by Virajith Jalaparti.

2018-03-13 Thread aengineer
HADOOP-15292. Distcp's use of pread is slowing it down.
Contributed by Virajith Jalaparti.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bd6b1fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bd6b1fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bd6b1fd

Branch: refs/heads/HDFS-7240
Commit: 3bd6b1fd85c44354c777ef4fda6415231505b2a4
Parents: b451889
Author: Steve Loughran 
Authored: Thu Mar 8 11:15:46 2018 +
Committer: Steve Loughran 
Committed: Thu Mar 8 11:15:46 2018 +

--
 .../tools/mapred/RetriableFileCopyCommand.java  | 24 ++
 .../hadoop/tools/util/ThrottledInputStream.java | 48 +++-
 .../hadoop/tools/mapred/TestCopyMapper.java | 24 +-
 3 files changed, 66 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd6b1fd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 21f621a..0311061 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -260,7 +260,8 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 boolean finished = false;
 try {
   inStream = getInputStream(source, context.getConfiguration());
-  int bytesRead = readBytes(inStream, buf, sourceOffset);
+  seekIfRequired(inStream, sourceOffset);
+  int bytesRead = readBytes(inStream, buf);
   while (bytesRead >= 0) {
 if (chunkLength > 0 &&
 (totalBytesRead + bytesRead) >= chunkLength) {
@@ -276,7 +277,7 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 if (finished) {
   break;
 }
-bytesRead = readBytes(inStream, buf, sourceOffset);
+bytesRead = readBytes(inStream, buf);
   }
   outStream.close();
   outStream = null;
@@ -299,13 +300,20 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 context.setStatus(message.toString());
   }
 
-  private static int readBytes(ThrottledInputStream inStream, byte buf[],
-  long position) throws IOException {
+  private static int readBytes(ThrottledInputStream inStream, byte buf[])
+  throws IOException {
+try {
+  return inStream.read(buf);
+} catch (IOException e) {
+  throw new CopyReadException(e);
+}
+  }
+
+  private static void seekIfRequired(ThrottledInputStream inStream,
+  long sourceOffset) throws IOException {
 try {
-  if (position == 0) {
-return inStream.read(buf);
-  } else {
-return inStream.read(position, buf, 0, buf.length);
+  if (sourceOffset != inStream.getPos()) {
+inStream.seek(sourceOffset);
   }
 } catch (IOException e) {
   throw new CopyReadException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd6b1fd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
index 2d2f10c..4d3676a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.tools.util;
 
-import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,7 +33,7 @@ import java.io.InputStream;
  * (Thus, while the read-rate might exceed the maximum for a given short 
interval,
  * the average tends towards the specified maximum, overall.)
  */
-public class ThrottledInputStream extends InputStream {
+public class ThrottledInputStream extends InputStream implements Seekable {
 
   private final InputStream rawStream;
   private final float maxBytesPerSec;
@@ -95,25 +95,6 @@ public class ThrottledInputStream extends InputStream {
 return readLen;
   }
 
-  /**
-   * Read bytes starting from the specified position. This requires rawStream 
is
-   * an instance of {@link PositionedReadable}.
-   */
-  public int 

[06/50] hadoop git commit: HADOOP-15273.distcp can't handle remote stores with different checksum algorithms. Contributed by Steve Loughran.

2018-03-13 Thread aengineer
HADOOP-15273.distcp can't handle remote stores with different checksum 
algorithms.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef4d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef4d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef4d942

Branch: refs/heads/HDFS-7240
Commit: 7ef4d942dd96232b0743a40ed25f77065254f94d
Parents: 3bd6b1f
Author: Steve Loughran 
Authored: Thu Mar 8 11:24:06 2018 +
Committer: Steve Loughran 
Committed: Thu Mar 8 11:24:06 2018 +

--
 .../org/apache/hadoop/tools/DistCpOptions.java  |  5 
 .../tools/mapred/RetriableFileCopyCommand.java  | 29 +++-
 .../hadoop/tools/mapred/TestCopyMapper.java | 14 +-
 3 files changed, 29 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ece1a94..f33f7fd 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -534,11 +534,6 @@ public final class DistCpOptions {
 + "mutually exclusive");
   }
 
-  if (!syncFolder && skipCRC) {
-throw new IllegalArgumentException(
-"Skip CRC is valid only with update options");
-  }
-
   if (!syncFolder && append) {
 throw new IllegalArgumentException(
 "Append is valid only with update options");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 0311061..55f90d0 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -210,15 +210,30 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
   throws IOException {
 if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
 targetFS, target)) {
-  StringBuilder errorMessage = new StringBuilder("Check-sum mismatch 
between ")
-  .append(source).append(" and ").append(target).append(".");
-  if (sourceFS.getFileStatus(source).getBlockSize() !=
+  StringBuilder errorMessage =
+  new StringBuilder("Checksum mismatch between ")
+  .append(source).append(" and ").append(target).append(".");
+  boolean addSkipHint = false;
+  String srcScheme = sourceFS.getScheme();
+  String targetScheme = targetFS.getScheme();
+  if (!srcScheme.equals(targetScheme)
+  && !(srcScheme.contains("hdfs") && targetScheme.contains("hdfs"))) {
+// the filesystems are different and they aren't both hdfs connectors
+errorMessage.append("Source and destination filesystems are of"
++ " different types\n")
+.append("Their checksum algorithms may be incompatible");
+addSkipHint = true;
+  } else if (sourceFS.getFileStatus(source).getBlockSize() !=
   targetFS.getFileStatus(target).getBlockSize()) {
-errorMessage.append(" Source and target differ in block-size.")
-.append(" Use -pb to preserve block-sizes during copy.")
-.append(" Alternatively, skip checksum-checks altogether, using 
-skipCrc.")
+errorMessage.append(" Source and target differ in block-size.\n")
+.append(" Use -pb to preserve block-sizes during copy.");
+addSkipHint = true;
+  }
+  if (addSkipHint) {
+errorMessage.append(" You can skip checksum-checks altogether "
++ " with -skipcrccheck.\n")
 .append(" (NOTE: By skipping checksums, one runs the risk of " +
-"masking data-corruption during file-transfer.)");
+"masking data-corruption during file-transfer.)\n");
   }
   throw new IOException(errorMessage.toString());
 }


[25/50] hadoop git commit: YARN-7523. Introduce description and version field in Service record. Contributed by Chandni Singh

2018-03-13 Thread aengineer
YARN-7523. Introduce description and version field in Service record. 
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1f5251f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1f5251f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1f5251f

Branch: refs/heads/HDFS-7240
Commit: e1f5251f3c0d6e74af1b52eda6633b728804fe2a
Parents: ea18e70
Author: Billie Rinaldi 
Authored: Sat Mar 10 07:49:10 2018 -0800
Committer: Billie Rinaldi 
Committed: Sat Mar 10 07:49:10 2018 -0800

--
 .../hadoop/yarn/service/webapp/ApiServer.java   |  4 +-
 .../definition/YARN-Services-Examples.md|  6 +++
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  7 +++
 .../hadoop/yarn/service/TestApiServer.java  |  1 +
 .../src/test/resources/example-app.json |  1 +
 .../examples/httpd-no-dns/httpd-no-dns.json |  1 +
 .../examples/httpd/httpd.json   |  1 +
 .../examples/sleeper/sleeper.json   |  1 +
 .../yarn/service/api/records/Service.java   | 46 +++-
 .../exceptions/RestApiErrorMessages.java|  2 +
 .../yarn/service/utils/ServiceApiUtil.java  |  6 +++
 .../hadoop/yarn/service/ServiceTestUtils.java   |  1 +
 .../hadoop/yarn/service/TestServiceApiUtil.java | 13 ++
 .../yarn/service/TestYarnNativeServices.java|  7 +++
 .../src/test/resources/example-app.json |  1 +
 .../service/conf/examples/app-override.json |  1 +
 .../hadoop/yarn/service/conf/examples/app.json  |  1 +
 .../yarn/service/conf/examples/default.json |  1 +
 .../yarn/service/conf/examples/external0.json   |  1 +
 .../yarn/service/conf/examples/external1.json   |  1 +
 .../yarn/service/conf/examples/external2.json   |  1 +
 .../markdown/yarn-service/YarnServiceAPI.md |  8 
 22 files changed, 108 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1f5251f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 0deeae7..e7979b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -123,8 +123,8 @@ public class ApiServer {
 return null;
   }
 });
-serviceStatus.setDiagnostics("Service "+service.getName() +
-" saved.");
+serviceStatus.setDiagnostics("Service " + service.getName() +
+" version " + service.getVersion() + " saved.");
   } else {
 ApplicationId applicationId = ugi
 .doAs(new PrivilegedExceptionAction() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1f5251f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index e4cdc7b..22f941e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -21,6 +21,8 @@ POST URL - http://localhost:9191/ws/v1/services
 ```json
 {
   "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example",
   "components" :
 [
   {
@@ -48,6 +50,8 @@ Note, lifetime value of -1 means unlimited lifetime.
 ```json
 {
 "name": "hello-world",
+"version": "1.0.0",
+"description": "hello world example",
 "id": "application_1503963985568_0002",
 "lifetime": -1,
 "components": [
@@ -154,6 +158,8 @@ POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
 ```json
 {
   "name": "hbase-app-1",
+  "version": "1.0.0",
+  "description": "hbase 

[03/50] hadoop git commit: HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu Asanuma.

2018-03-13 Thread aengineer
HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9a6d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9a6d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9a6d9

Branch: refs/heads/HDFS-7240
Commit: 4cc9a6d9bb34329d6de30706d5432c7cb675bb88
Parents: 583f459
Author: Yiqun Lin 
Authored: Thu Mar 8 16:02:34 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Mar 8 16:02:34 2018 +0800

--
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9a6d9/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 9b2d9de..8b9cfda 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -223,7 +223,7 @@ functionality, except the mount table is managed on the 
server-side by the
 routing layer rather than on the client. This simplifies access to a federated
 cluster for existing HDFS clients.
 
-See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
 HDFS Router-based Federation
 [documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
 more details.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] hadoop git commit: HDFS-10618. TestPendingReconstruction#testPendingAndInvalidate is flaky due to race condition. Contributed by Eric Badger.

2018-03-13 Thread aengineer
HDFS-10618. TestPendingReconstruction#testPendingAndInvalidate is flaky due to 
race condition. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cceb68ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cceb68ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cceb68ff

Branch: refs/heads/HDFS-7240
Commit: cceb68ffeaebe31c11012faa294fe027d04706a8
Parents: ac627f5
Author: Anu Engineer 
Authored: Mon Mar 12 12:07:22 2018 -0700
Committer: Anu Engineer 
Committed: Mon Mar 12 12:07:22 2018 -0700

--
 .../server/blockmanagement/TestPendingReconstruction.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cceb68ff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 29ee953..dc37ec06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -456,14 +456,14 @@ public class TestPendingReconstruction {
 "STORAGE_ID", "TEST");
 bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
 "STORAGE_ID", "TEST");
+BlockManagerTestUtil.computeAllPendingWork(bm);
+BlockManagerTestUtil.updateState(bm);
+assertEquals(bm.getPendingReconstructionBlocksCount(), 1L);
+BlockInfo storedBlock = 
bm.getStoredBlock(block.getBlock().getLocalBlock());
+assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
   } finally {
 cluster.getNamesystem().writeUnlock();
   }
-  BlockManagerTestUtil.computeAllPendingWork(bm);
-  BlockManagerTestUtil.updateState(bm);
-  assertEquals(bm.getPendingReconstructionBlocksCount(), 1L);
-  BlockInfo storedBlock = 
bm.getStoredBlock(block.getBlock().getLocalBlock());
-  assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
 
   // 4. delete the file
   fs.delete(filePath, true);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] hadoop git commit: HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus.

2018-03-13 Thread aengineer
HDFS-11394. Support for getting erasure coding policy through 
WebHDFS#FileStatus.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a082fbe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a082fbe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a082fbe

Branch: refs/heads/HDFS-7240
Commit: 9a082fbe6e302df7139b65a23be9a39acd87715d
Parents: 99ab511
Author: Hanisha Koneru 
Authored: Fri Mar 9 13:20:32 2018 -0800
Committer: Hanisha Koneru 
Committed: Fri Mar 9 13:20:32 2018 -0800

--
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   3 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 111 +++
 2 files changed, 114 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a082fbe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 095b9ac..83fbc6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -134,6 +134,9 @@ public class JsonUtil {
 }
 if (status.isErasureCoded()) {
   m.put("ecBit", true);
+  if (status.getErasureCodingPolicy() != null) {
+m.put("ecPolicy", status.getErasureCodingPolicy().getName());
+  }
 }
 if (status.isSnapshotEnabled()) {
   m.put("snapshotEnabled", status.isSnapshotEnabled());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a082fbe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index c94122e..8571d82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -44,6 +44,7 @@ import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
+import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Random;
@@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.TestDFSClientRetries;
 import org.apache.hadoop.hdfs.TestFileCreation;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -109,6 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.log4j.Level;
+import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Assert;
@@ -1578,6 +1581,114 @@ public class TestWebHDFS {
 }
   }
 
+  /**
+   * Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the
+   * ecPolicy for EC files.
+   */
+  @Test(timeout=30)
+  public void testECPolicyInFileStatus() throws Exception {
+final Configuration conf = WebHdfsTestUtil.createConf();
+final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
+.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
+final String ecPolicyName = ecPolicy.getName();
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.build();
+cluster.waitActive();
+final DistributedFileSystem fs = cluster.getFileSystem();
+
+// Create an EC dir and write a test file in it
+final Path ecDir = new Path("/ec");
+Path ecFile = new Path(ecDir, "ec_file.txt");
+Path nonEcFile = new Path(ecDir, "non_ec_file.txt");
+fs.mkdirs(ecDir);
+
+// Create a non-EC file before enabling ec policy
+DFSTestUtil.createFile(fs, nonEcFile, 1024, (short) 1, 0);
+
+fs.enableErasureCodingPolicy(ecPolicyName);
+fs.setErasureCodingPolicy(ecDir, ecPolicyName);
+
+// Create a EC file
+DFSTestUtil.createFile(fs, ecFile, 1024, (short) 1, 0);
+
+// Query webhdfs REST API 

[2/2] hadoop git commit: YARN-7657. Queue Mapping could provide options to provide 'user' specific auto-created queues under a specified group parent queue. (Suma Shivaprasad via wangda)

2018-03-13 Thread wangda
YARN-7657. Queue Mapping could provide options to provide 'user' specific 
auto-created queues under a specified group parent queue. (Suma Shivaprasad via 
wangda)

Change-Id: I32d566e8727840e43c0d66e39a77edef017e3a83


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b167d607
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b167d607
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b167d607

Branch: refs/heads/trunk
Commit: b167d60763f4b9ac69976df2da5d988e5faa85e0
Parents: a5b27b3
Author: Wangda Tan 
Authored: Tue Mar 13 17:55:51 2018 -0700
Committer: Wangda Tan 
Committed: Tue Mar 13 17:55:51 2018 -0700

--
 .../UserGroupMappingPlacementRule.java  |  7 +++
 .../server/resourcemanager/TestAppManager.java  |  4 +-
 .../TestUserGroupMappingPlacementRule.java  | 21 +
 .../capacity/TestCapacityScheduler.java | 34 ++
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 47 ++--
 .../TestCapacitySchedulerAutoQueueCreation.java | 43 --
 .../TestQueueManagementDynamicEditPolicy.java   |  4 +-
 7 files changed, 139 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b167d607/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
index d03b832..1f95a59 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
@@ -104,6 +104,10 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   return parentQueue;
 }
 
+public boolean hasParentQueue() {
+  return parentQueue != null;
+}
+
 public MappingType getType() {
   return type;
 }
@@ -164,6 +168,9 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   if (mapping.type == MappingType.GROUP) {
 for (String userGroups : groups.getGroups(user)) {
   if (userGroups.equals(mapping.source)) {
+if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
+  return getPlacementContext(mapping, user);
+}
 return getPlacementContext(mapping);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b167d607/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 7c2ce64..1a1b527 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -410,12 +410,12 @@ public class TestAppManager{
 new ClientToAMTokenSecretManagerInRM(), 
newMockRMContext.getScheduler(),
 masterService, new ApplicationACLsManager(conf), conf);
 
-//only user test has permission to submit to 'test' queue
+//only user test has permission to submit to 'user1' queue
 newAppMonitor.submitApplication(asContext, "user1");
 
 try {
   //should fail since user does not have permission to submit to queue
-  // 'test'
+  // 'managedparent'
   asContext.setApplicationId(appId = MockApps.newAppID(2));
   newAppMonitor.submitApplication(asContext, "user2");
 } catch (YarnException e) {


[1/2] hadoop git commit: YARN-5015. Support sliding window retry capability for container restart. (Chandni Singh via wangda)

2018-03-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9714fc1dd -> b167d6076


YARN-5015. Support sliding window retry capability for container restart. 
(Chandni Singh via wangda)

Change-Id: I07addd3e4ba8d98456ee2ff1d5c540a38fe61dea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5b27b3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5b27b3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5b27b3c

Branch: refs/heads/trunk
Commit: a5b27b3c678ad2f5cb8dbfa1b60ef5cd365f8bde
Parents: 9714fc1
Author: Wangda Tan 
Authored: Tue Mar 13 17:55:17 2018 -0700
Committer: Wangda Tan 
Committed: Tue Mar 13 17:55:17 2018 -0700

--
 .../yarn/api/records/ContainerRetryContext.java |  21 ++-
 .../src/main/proto/yarn_protos.proto|   1 +
 .../distributedshell/ApplicationMaster.java |  10 +-
 .../applications/distributedshell/Client.java   |   7 +
 .../impl/pb/ContainerRetryContextPBImpl.java|  15 ++
 .../container/ContainerImpl.java|  86 +-
 .../container/SlidingWindowRetryPolicy.java | 165 +++
 .../recovery/NMLeveldbStateStoreService.java|  24 +++
 .../recovery/NMNullStateStoreService.java   |   5 +
 .../recovery/NMStateStoreService.java   |  21 +++
 .../container/TestContainer.java|  37 +
 .../container/TestSlidingWindowRetryPolicy.java |  77 +
 .../recovery/NMMemoryStateStoreService.java |   9 +
 .../TestNMLeveldbStateStoreService.java |  16 ++
 14 files changed, 451 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5b27b3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerRetryContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerRetryContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerRetryContext.java
index ef8bd17..7fb0036 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerRetryContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerRetryContext.java
@@ -49,6 +49,13 @@ import java.util.Set;
  *   
  *   retryInterval specifies delaying some time before relaunch
  *   container, the unit is millisecond.
+ *   
+ * failuresValidityInterval: default value is -1.
+ * When failuresValidityInterval in milliseconds is set to {@literal >} 0,
+ * the failure number will not take failures which happen out of the
+ * failuresValidityInterval into failure count. If failure count
+ * reaches to maxRetries, the container will be failed.
+ *   
  * 
  */
 @Public
@@ -63,16 +70,25 @@ public abstract class ContainerRetryContext {
   @Unstable
   public static ContainerRetryContext newInstance(
   ContainerRetryPolicy retryPolicy, Set errorCodes,
-  int maxRetries, int retryInterval) {
+  int maxRetries, int retryInterval, long failuresValidityInterval) {
 ContainerRetryContext containerRetryContext =
 Records.newRecord(ContainerRetryContext.class);
 containerRetryContext.setRetryPolicy(retryPolicy);
 containerRetryContext.setErrorCodes(errorCodes);
 containerRetryContext.setMaxRetries(maxRetries);
 containerRetryContext.setRetryInterval(retryInterval);
+
containerRetryContext.setFailuresValidityInterval(failuresValidityInterval);
 return containerRetryContext;
   }
 
+  @Private
+  @Unstable
+  public static ContainerRetryContext newInstance(
+  ContainerRetryPolicy retryPolicy, Set errorCodes,
+  int maxRetries, int retryInterval) {
+return newInstance(retryPolicy, errorCodes, maxRetries, retryInterval, -1);
+  }
+
   public abstract ContainerRetryPolicy getRetryPolicy();
   public abstract void setRetryPolicy(ContainerRetryPolicy retryPolicy);
   public abstract Set getErrorCodes();
@@ -81,4 +97,7 @@ public abstract class ContainerRetryContext {
   public abstract void setMaxRetries(int maxRetries);
   public abstract int getRetryInterval();
   public abstract void setRetryInterval(int retryInterval);
+  public abstract long getFailuresValidityInterval();
+  public abstract void setFailuresValidityInterval(
+  long failuresValidityInterval);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5b27b3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 

hadoop git commit: HDFS-13137. Ozone: Ozonefs read fails because ChunkGroupInputStream#read does not iterate through all the blocks in the key. Contributed by Mukul Kumar Singh.

2018-03-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 6690ae738 -> faa01f320


HDFS-13137. Ozone: Ozonefs read fails because ChunkGroupInputStream#read does 
not iterate through all the blocks in the key.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faa01f32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faa01f32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faa01f32

Branch: refs/heads/HDFS-7240
Commit: faa01f32027de4593026941f0a36cae644f6d436
Parents: 6690ae7
Author: Anu Engineer 
Authored: Tue Mar 13 17:27:57 2018 -0700
Committer: Anu Engineer 
Committed: Tue Mar 13 17:27:57 2018 -0700

--
 .../hadoop/ozone/client/OzoneClientFactory.java |   7 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  10 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |   5 +-
 .../apache/hadoop/ozone/ksm/KeyManagerImpl.java |   6 +-
 ...ceManagerProtocolServerSideTranslatorPB.java |  12 ++
 .../scm/pipelines/ratis/RatisManagerImpl.java   |   2 +-
 .../standalone/StandaloneManagerImpl.java   |   3 +-
 .../src/main/resources/ozone-default.xml|   9 ++
 .../hadoop/fs/ozone/OzoneFSInputStream.java |   5 +
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   4 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java | 155 +++
 .../fs/ozone/TestOzoneFileInterfaces.java   |   4 +-
 12 files changed, 204 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa01f32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 193d80d..5069220 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Proxy;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys
@@ -285,15 +286,19 @@ public final class OzoneClientFactory {
   Class protocolClass, Configuration config)
   throws IOException {
 try {
-  LOG.info("Using {} as client protocol.",
+  LOG.debug("Using {} as client protocol.",
   protocolClass.getCanonicalName());
   Constructor ctor =
   protocolClass.getConstructor(Configuration.class);
   return ctor.newInstance(config);
 } catch (Exception e) {
   final String message = "Couldn't create protocol " + protocolClass;
+  LOG.error(message + " exception:" + e);
   if (e.getCause() instanceof IOException) {
 throw (IOException) e.getCause();
+  } else if (e instanceof InvocationTargetException) {
+throw new IOException(message,
+((InvocationTargetException) e).getTargetException());
   } else {
 throw new IOException(message, e);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa01f32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 0e3bc47..afe5e45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -89,13 +89,11 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
 
   @Override
   public synchronized int read() throws IOException {
-checkNotClosed();
-if (streamEntries.size() <= currentStreamIndex) {
+byte[] buf = new byte[1];
+if (read(buf, 0, 1) == EOF) {
   return EOF;
 }
-ChunkInputStreamEntry entry = streamEntries.get(currentStreamIndex);
-int data = entry.read();
-return data;
+return Byte.toUnsignedInt(buf[0]);
   }
 
   @Override
@@ -120,7 +118,7 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
   int actualLen = 

hadoop git commit: HDFS-13108. Ozone: OzoneFileSystem: Simplified url schema for Ozone File System. Contributed by Elek, Marton.

2018-03-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 0becabcef -> 6690ae738


HDFS-13108. Ozone: OzoneFileSystem: Simplified url schema for Ozone File 
System. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6690ae73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6690ae73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6690ae73

Branch: refs/heads/HDFS-7240
Commit: 6690ae7385359180330b92251493cadee91d2c56
Parents: 0becabc
Author: Anu Engineer 
Authored: Tue Mar 13 17:02:53 2018 -0700
Committer: Anu Engineer 
Committed: Tue Mar 13 17:02:53 2018 -0700

--
 .../ozone/web/interfaces/StorageHandler.java|   3 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |  53 +++---
 .../fs/ozone/TestOzoneFileInterfaces.java   | 160 +++
 .../hadoop/fs/ozone/contract/OzoneContract.java |   4 +-
 4 files changed, 153 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6690ae73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
index 67d2c87..6336c90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
 import org.apache.hadoop.ozone.web.response.VolumeInfo;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.io.OutputStream;
 
@@ -45,7 +46,7 @@ import java.io.OutputStream;
  * and another which will point to the HDFS backend.
  */
 @InterfaceAudience.Private
-public interface StorageHandler {
+public interface StorageHandler extends Closeable{
 
   /**
* Creates a Storage Volume.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6690ae73/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 34d6d3a..9f78f2d 100644
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -27,6 +27,12 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.Objects;
 import java.util.Iterator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -47,9 +53,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.ReplicationFactor;
 import org.apache.hadoop.ozone.client.ReplicationType;
 import org.apache.http.client.utils.URIBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -90,6 +93,9 @@ public class OzoneFileSystem extends FileSystem {
   private ReplicationType replicationType;
   private ReplicationFactor replicationFactor;
 
+  private static final Pattern URL_SCHEMA_PATTERN =
+  Pattern.compile("(.+)\\.([^\\.]+)");
+
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
 super.initialize(name, conf);
@@ -97,29 +103,20 @@ public class OzoneFileSystem extends FileSystem {
 Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
 assert getScheme().equals(name.getScheme());
 
-Path path = new Path(name.getPath());
-String hostStr = name.getAuthority();
-String volumeStr = null;
-String bucketStr = null;
+String authority = name.getAuthority();
 
-while (path != null && !path.isRoot()) {
-  bucketStr = volumeStr;
-  volumeStr = path.getName();
-  path = path.getParent();
-}
+Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
 
-if (hostStr == null) {
-  throw new IllegalArgumentException("No host 

hadoop git commit: HDFS-336. dfsadmin -report should report number of blocks from datanode. Contributed by Bharat Viswanadham.

2018-03-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 39537b7c8 -> 9714fc1dd


HDFS-336. dfsadmin -report should report number of blocks from datanode. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9714fc1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9714fc1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9714fc1d

Branch: refs/heads/trunk
Commit: 9714fc1dd48edb1c40d96d69ae82ed3b0fab7748
Parents: 39537b7
Author: Arpit Agarwal 
Authored: Tue Mar 13 16:39:17 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Mar 13 16:39:17 2018 -0700

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 31 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  5 ++-
 .../src/main/proto/hdfs.proto   |  1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  1 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 38 
 .../src/test/resources/testHDFSConf.xml |  4 +++
 6 files changed, 77 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9714fc1d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 0a8c915..c140d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -56,6 +56,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
   public static final DatanodeInfo[] EMPTY_ARRAY = {};
+  private int numBlocks;
 
   // Datanode administrative states
   public enum AdminStates {
@@ -106,6 +107,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.upgradeDomain = from.getUpgradeDomain();
 this.lastBlockReportTime = from.getLastBlockReportTime();
 this.lastBlockReportMonotonic = from.getLastBlockReportMonotonic();
+this.numBlocks = from.getNumBlocks();
   }
 
   protected DatanodeInfo(DatanodeID nodeID) {
@@ -123,6 +125,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.adminState = null;
 this.lastBlockReportTime = 0L;
 this.lastBlockReportMonotonic = 0L;
+this.numBlocks = 0;
   }
 
   protected DatanodeInfo(DatanodeID nodeID, String location) {
@@ -139,7 +142,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   final long lastUpdate, final long lastUpdateMonotonic,
   final int xceiverCount, final String networkLocation,
   final AdminStates adminState, final String upgradeDomain,
-  final long lastBlockReportTime, final long lastBlockReportMonotonic) {
+  final long lastBlockReportTime, final long lastBlockReportMonotonic,
+   final int blockCount) {
 super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
 ipcPort);
 this.capacity = capacity;
@@ -157,6 +161,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.upgradeDomain = upgradeDomain;
 this.lastBlockReportTime = lastBlockReportTime;
 this.lastBlockReportMonotonic = lastBlockReportMonotonic;
+this.numBlocks = blockCount;
   }
 
   /** Network location name. */
@@ -247,6 +252,13 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   public long getLastUpdateMonotonic() { return lastUpdateMonotonic;}
 
   /**
+   * @return Num of Blocks
+   */
+  public int getNumBlocks() {
+return numBlocks;
+  }
+
+  /**
* Set lastUpdate monotonic time
*/
   public void setLastUpdateMonotonic(long lastUpdateMonotonic) {
@@ -301,6 +313,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.xceiverCount = xceiverCount;
   }
 
+  /** Sets number of blocks. */
+  public void setNumBlocks(int blockCount) {
+this.numBlocks = blockCount;
+  }
+
   /** network location */
   @Override
   public String getNetworkLocation() {return location;}
@@ -351,6 +368,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 float cacheUsedPercent = getCacheUsedPercent();
 float cacheRemainingPercent = getCacheRemainingPercent();
 String lookupName = NetUtils.getHostNameOfIP(getName());
+int blockCount = getNumBlocks();
 
 buffer.append("Name: ").append(getName());
 if (lookupName != null) {
@@ -406,6 

hadoop git commit: HDFS-13235. DiskBalancer: Update Documentation to add newly added options. Contributed by Bharat Viswanadham.

2018-03-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9d6994da1 -> 39537b7c8


HDFS-13235. DiskBalancer: Update Documentation to add newly added options. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39537b7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39537b7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39537b7c

Branch: refs/heads/trunk
Commit: 39537b7c84dddfa8084308459565ab77fd24abd3
Parents: 9d6994d
Author: Arpit Agarwal 
Authored: Tue Mar 13 16:35:51 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Mar 13 16:35:51 2018 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 6 +++---
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md  | 6 +-
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2d3c5e7..f90daba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4651,9 +4651,9 @@
 dfs.disk.balancer.plan.valid.interval
 1d
 
-  Maximum number of hours the disk balancer plan is valid.
-  This setting supports multiple time unit suffixes as described
-  in dfs.heartbeat.interval. If no suffix is specified then milliseconds
+  Maximum amount of time disk balancer plan is valid. This setting
+  supports multiple time unit suffixes as described in
+  dfs.heartbeat.interval. If no suffix is specified then milliseconds
   is assumed.
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 6e1bd41..ed0233a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -79,6 +79,10 @@ Execute command takes a plan command executes it against the 
datanode that plan
 
 This executes the plan by reading datanode’s address from the plan file.
 
+| COMMAND\_OPTION| Description |
+|: |: |
+| `-skipDateCheck` |  Skip date check and force execute the plan.|
+
 ### Query
 
 Query command gets the current status of the diskbalancer from a datanode.
@@ -122,7 +126,7 @@ There is a set of diskbalancer settings that can be 
controlled via hdfs-site.xml
 |`dfs.disk.balancer.max.disk.errors`| sets the value of maximum number of 
errors we can ignore for a specific move between two disks before it is 
abandoned. For example, if a plan has 3 pair of disks to copy between , and the 
first disk set encounters more than 5 errors, then we abandon the first copy 
and start the second copy in the plan. The default value of max errors is set 
to 5.|
 |`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent specifies 
when we have reached a good enough value for any copy step. For example, if you 
specify 10% then getting close to 10% of the target value is good enough.|
 |`dfs.disk.balancer.plan.threshold.percent`| The percentage threshold value 
for volume Data Density in a plan. If the absolute value of volume Data Density 
which is out of threshold value in a node, it means that the volumes 
corresponding to the disks should do the balancing in the plan. The default 
value is 10.|
-
+|`dfs.disk.balancer.plan.valid.interval`| Maximum amount of time disk balancer 
plan is valid. Supports the following suffixes (case insensitive): ms(millis), 
s(sec), m(min), h(hour), d(day) to specify the time (such as 2s, 2m, 1h, etc.). 
If no suffix is specified then milliseconds is assumed. Default value is 1d|
  Debugging
 -
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] hadoop git commit: Updated timeline reader to use AuthenticationFilter

2018-03-13 Thread arp
Updated timeline reader to use AuthenticationFilter

Change-Id: I961771589180c1eb377d36c37a79aa23754effbf
(cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea18e70a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea18e70a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea18e70a

Branch: refs/heads/HDFS-12996
Commit: ea18e70a74e811ffa48c7e18e68510dd37dda63d
Parents: fa6a8b7
Author: Wangda Tan 
Authored: Thu Mar 8 09:23:45 2018 -0800
Committer: Wangda Tan 
Committed: Fri Mar 9 22:51:08 2018 -0800

--
 .../TimelineReaderAuthenticationFilterInitializer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea18e70a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
index e0e1f4d..6a3658d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -20,11 +20,11 @@ package 
org.apache.hadoop.yarn.server.timelineservice.reader.security;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import 
org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
 
 /**
- * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * Filter initializer to initialize {@link AuthenticationFilter}
  * for ATSv2 timeline reader server with timeline service specific
  * configurations.
  */
@@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
 TimelineAuthenticationFilterInitializer{
 
   /**
-   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * Initializes {@link AuthenticationFilter}
* 
-   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value TimelineAuthenticationFilterInitializer#PREFIX}.
*
@@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
   public void initFilter(FilterContainer container, Configuration conf) {
 setAuthFilterConfig(conf);
 container.addGlobalFilter("Timeline Reader Authentication Filter",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 getFilterConfig());
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] hadoop git commit: HDFS-10618. TestPendingReconstruction#testPendingAndInvalidate is flaky due to race condition. Contributed by Eric Badger.

2018-03-13 Thread arp
HDFS-10618. TestPendingReconstruction#testPendingAndInvalidate is flaky due to 
race condition. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cceb68ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cceb68ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cceb68ff

Branch: refs/heads/HDFS-12996
Commit: cceb68ffeaebe31c11012faa294fe027d04706a8
Parents: ac627f5
Author: Anu Engineer 
Authored: Mon Mar 12 12:07:22 2018 -0700
Committer: Anu Engineer 
Committed: Mon Mar 12 12:07:22 2018 -0700

--
 .../server/blockmanagement/TestPendingReconstruction.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cceb68ff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 29ee953..dc37ec06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -456,14 +456,14 @@ public class TestPendingReconstruction {
 "STORAGE_ID", "TEST");
 bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
 "STORAGE_ID", "TEST");
+BlockManagerTestUtil.computeAllPendingWork(bm);
+BlockManagerTestUtil.updateState(bm);
+assertEquals(bm.getPendingReconstructionBlocksCount(), 1L);
+BlockInfo storedBlock = 
bm.getStoredBlock(block.getBlock().getLocalBlock());
+assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
   } finally {
 cluster.getNamesystem().writeUnlock();
   }
-  BlockManagerTestUtil.computeAllPendingWork(bm);
-  BlockManagerTestUtil.updateState(bm);
-  assertEquals(bm.getPendingReconstructionBlocksCount(), 1L);
-  BlockInfo storedBlock = 
bm.getStoredBlock(block.getBlock().getLocalBlock());
-  assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
 
   // 4. delete the file
   fs.delete(filePath, true);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] hadoop git commit: Revert "YARN-7891. LogAggregationIndexedFileController should support read from HAR file. (Xuan Gong via wangda)"

2018-03-13 Thread arp
Revert "YARN-7891. LogAggregationIndexedFileController should support read from 
HAR file. (Xuan Gong via wangda)"

This reverts commit 4d53ef7eefb14661d824924e503a910de1ae997f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e718ac59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e718ac59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e718ac59

Branch: refs/heads/HDFS-12996
Commit: e718ac597f2225cb4946e1ac4b3986c336645643
Parents: 19ae442
Author: Wangda Tan 
Authored: Wed Mar 7 15:42:29 2018 -0800
Committer: Wangda Tan 
Committed: Wed Mar 7 15:46:47 2018 -0800

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   4 -
 .../LogAggregationIndexedFileController.java|  60 ++---
 .../TestLogAggregationIndexFileController.java  |  54 
 .../application_123456_0001.har/_SUCCESS|   0
 .../application_123456_0001.har/_index  |   3 -
 .../application_123456_0001.har/_masterindex|   2 -
 .../application_123456_0001.har/part-0  | Bin 4123 -> 0 bytes
 .../RegisterNodeManagerRequest.java |   5 -
 .../pb/RegisterNodeManagerRequestPBImpl.java|  79 --
 .../yarn_server_common_service_protos.proto |   1 -
 .../hadoop/yarn/server/nodemanager/Context.java |   4 +-
 .../yarn/server/nodemanager/NodeManager.java|  12 -
 .../nodemanager/NodeStatusUpdaterImpl.java  |  14 --
 .../containermanager/ContainerManagerImpl.java  |  15 --
 .../logaggregation/AppLogAggregatorImpl.java|  11 +-
 .../tracker/NMLogAggregationStatusTracker.java  | 244 ---
 .../amrmproxy/BaseAMRMProxyTest.java|   6 -
 .../TestNMLogAggregationStatusTracker.java  | 124 --
 .../resourcemanager/ResourceTrackerService.java |  17 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |   6 -
 .../rmnode/RMNodeStartedEvent.java  |  11 -
 21 files changed, 26 insertions(+), 646 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e718ac59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 5378072..a235478 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -249,10 +249,6 @@
 
src/test/resources/application_1440536969523_0001.har/part-0
 
src/test/resources/application_1440536969523_0001.har/_masterindex
 
src/test/resources/application_1440536969523_0001.har/_SUCCESS
-
src/test/resources/application_123456_0001.har/_index
-
src/test/resources/application_123456_0001.har/part-0
-
src/test/resources/application_123456_0001.har/_masterindex
-
src/test/resources/application_123456_0001.har/_SUCCESS
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e718ac59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 5bba2e0..56bae26 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -495,21 +495,16 @@ public class LogAggregationIndexedFileController
 boolean getAllContainers = (containerIdStr == null
 || containerIdStr.isEmpty());
 long size = logRequest.getBytes();
-RemoteIterator nodeFiles = LogAggregationUtils
-.getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner(),
+List nodeFiles = LogAggregationUtils
+.getRemoteNodeFileList(conf, appId, logRequest.getAppOwner(),
 this.remoteRootLogDir, this.remoteRootLogDirSuffix);
-if (!nodeFiles.hasNext()) {
+if (nodeFiles.isEmpty()) {
   throw new IOException("There is no available log fils for "
   + "application:" + appId);
 }
-List allFiles = getAllNodeFiles(nodeFiles, appId);
-if (allFiles.isEmpty()) {
-  

[09/50] hadoop git commit: HDFS-13232. RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns. Contributed by Chao Sun.

2018-03-13 Thread arp
HDFS-13232. RBF: ConnectionManager's cleanup task will compare each pool's own 
active conns with its total conns. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c2b969e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c2b969e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c2b969e

Branch: refs/heads/HDFS-12996
Commit: 0c2b969e0161a068bf9ae013c4b95508dfb90a8a
Parents: 7ef4d94
Author: Inigo Goiri 
Authored: Thu Mar 8 09:32:05 2018 -0800
Committer: Inigo Goiri 
Committed: Thu Mar 8 09:32:05 2018 -0800

--
 .../federation/router/ConnectionManager.java|  59 +-
 .../federation/router/ConnectionPoolId.java |   6 +
 .../router/TestConnectionManager.java   | 114 +++
 3 files changed, 153 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2b969e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 2e45280..594f489 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -32,6 +32,7 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -303,6 +304,38 @@ public class ConnectionManager {
 return JSON.toString(info);
   }
 
+  @VisibleForTesting
+  Map getPools() {
+return this.pools;
+  }
+
+  /**
+   * Clean the unused connections for this pool.
+   *
+   * @param pool Connection pool to cleanup.
+   */
+  @VisibleForTesting
+  void cleanup(ConnectionPool pool) {
+if (pool.getNumConnections() > pool.getMinSize()) {
+  // Check if the pool hasn't been active in a while or not 50% are used
+  long timeSinceLastActive = Time.now() - pool.getLastActiveTime();
+  int total = pool.getNumConnections();
+  int active = pool.getNumActiveConnections();
+  if (timeSinceLastActive > connectionCleanupPeriodMs ||
+  active < MIN_ACTIVE_RATIO * total) {
+// Remove and close 1 connection
+List conns = pool.removeConnections(1);
+for (ConnectionContext conn : conns) {
+  conn.close();
+}
+LOG.debug("Removed connection {} used {} seconds ago. " +
+"Pool has {}/{} connections", pool.getConnectionPoolId(),
+TimeUnit.MILLISECONDS.toSeconds(timeSinceLastActive),
+pool.getNumConnections(), pool.getMaxSize());
+  }
+}
+  }
+
   /**
* Removes stale connections not accessed recently from the pool. This is
* invoked periodically.
@@ -350,32 +383,6 @@ public class ConnectionManager {
 }
   }
 }
-
-/**
- * Clean the unused connections for this pool.
- *
- * @param pool Connection pool to cleanup.
- */
-private void cleanup(ConnectionPool pool) {
-  if (pool.getNumConnections() > pool.getMinSize()) {
-// Check if the pool hasn't been active in a while or not 50% are used
-long timeSinceLastActive = Time.now() - pool.getLastActiveTime();
-int total = pool.getNumConnections();
-int active = getNumActiveConnections();
-if (timeSinceLastActive > connectionCleanupPeriodMs ||
-active < MIN_ACTIVE_RATIO * total) {
-  // Remove and close 1 connection
-  List conns = pool.removeConnections(1);
-  for (ConnectionContext conn : conns) {
-conn.close();
-  }
-  LOG.debug("Removed connection {} used {} seconds ago. " +
-  "Pool has {}/{} connections", pool.getConnectionPoolId(),
-  TimeUnit.MILLISECONDS.toSeconds(timeSinceLastActive),
-  pool.getNumConnections(), pool.getMaxSize());
-}
-  }
-}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c2b969e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java

[08/50] hadoop git commit: HADOOP-15273.distcp can't handle remote stores with different checksum algorithms. Contributed by Steve Loughran.

2018-03-13 Thread arp
HADOOP-15273.distcp can't handle remote stores with different checksum 
algorithms.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef4d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef4d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef4d942

Branch: refs/heads/HDFS-12996
Commit: 7ef4d942dd96232b0743a40ed25f77065254f94d
Parents: 3bd6b1f
Author: Steve Loughran 
Authored: Thu Mar 8 11:24:06 2018 +
Committer: Steve Loughran 
Committed: Thu Mar 8 11:24:06 2018 +

--
 .../org/apache/hadoop/tools/DistCpOptions.java  |  5 
 .../tools/mapred/RetriableFileCopyCommand.java  | 29 +++-
 .../hadoop/tools/mapred/TestCopyMapper.java | 14 +-
 3 files changed, 29 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ece1a94..f33f7fd 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -534,11 +534,6 @@ public final class DistCpOptions {
 + "mutually exclusive");
   }
 
-  if (!syncFolder && skipCRC) {
-throw new IllegalArgumentException(
-"Skip CRC is valid only with update options");
-  }
-
   if (!syncFolder && append) {
 throw new IllegalArgumentException(
 "Append is valid only with update options");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 0311061..55f90d0 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -210,15 +210,30 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
   throws IOException {
 if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
 targetFS, target)) {
-  StringBuilder errorMessage = new StringBuilder("Check-sum mismatch 
between ")
-  .append(source).append(" and ").append(target).append(".");
-  if (sourceFS.getFileStatus(source).getBlockSize() !=
+  StringBuilder errorMessage =
+  new StringBuilder("Checksum mismatch between ")
+  .append(source).append(" and ").append(target).append(".");
+  boolean addSkipHint = false;
+  String srcScheme = sourceFS.getScheme();
+  String targetScheme = targetFS.getScheme();
+  if (!srcScheme.equals(targetScheme)
+  && !(srcScheme.contains("hdfs") && targetScheme.contains("hdfs"))) {
+// the filesystems are different and they aren't both hdfs connectors
+errorMessage.append("Source and destination filesystems are of"
++ " different types\n")
+.append("Their checksum algorithms may be incompatible");
+addSkipHint = true;
+  } else if (sourceFS.getFileStatus(source).getBlockSize() !=
   targetFS.getFileStatus(target).getBlockSize()) {
-errorMessage.append(" Source and target differ in block-size.")
-.append(" Use -pb to preserve block-sizes during copy.")
-.append(" Alternatively, skip checksum-checks altogether, using 
-skipCrc.")
+errorMessage.append(" Source and target differ in block-size.\n")
+.append(" Use -pb to preserve block-sizes during copy.");
+addSkipHint = true;
+  }
+  if (addSkipHint) {
+errorMessage.append(" You can skip checksum-checks altogether "
++ " with -skipcrccheck.\n")
 .append(" (NOTE: By skipping checksums, one runs the risk of " +
-"masking data-corruption during file-transfer.)");
+"masking data-corruption during file-transfer.)\n");
   }
   throw new IOException(errorMessage.toString());
 }


[01/50] hadoop git commit: HDFS-13176. WebHdfs file path gets truncated when having semicolon (; ) inside. Contributed by Zsolt Venczel.

2018-03-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12996 85a0ed79b -> 8d4aa014d


HDFS-13176. WebHdfs file path gets truncated when having semicolon (;) inside. 
Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46d29e3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46d29e3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46d29e3d

Branch: refs/heads/HDFS-12996
Commit: 46d29e3d7ee8dc9bb1818b886d9cc5336b1d67a4
Parents: 037d783
Author: Sean Mackrory 
Authored: Wed Mar 7 12:11:52 2018 -0700
Committer: Sean Mackrory 
Committed: Wed Mar 7 13:33:41 2018 -0700

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 28 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java|  3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  5 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  | 57 
 4 files changed, 90 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d29e3d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 45260f3..1c919c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -37,6 +37,8 @@ import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URL;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -598,8 +600,32 @@ public class WebHdfsFileSystem extends FileSystem
   URL toUrl(final HttpOpParam.Op op, final Path fspath,
   final Param... parameters) throws IOException {
 //initialize URI path and query
+
+Path encodedFSPath = fspath;
+if (fspath != null) {
+  URI fspathUri = fspath.toUri();
+  String fspathUriDecoded = fspathUri.getPath();
+  try {
+fspathUriDecoded = URLDecoder.decode(fspathUri.getPath(), "UTF-8");
+  } catch (IllegalArgumentException ex) {
+LOG.trace("Cannot decode URL encoded file", ex);
+  }
+  String[] fspathItems = fspathUriDecoded.split("/");
+
+  if (fspathItems.length > 0) {
+StringBuilder fsPathEncodedItems = new StringBuilder();
+for (String fsPathItem : fspathItems) {
+  fsPathEncodedItems.append("/");
+  fsPathEncodedItems.append(URLEncoder.encode(fsPathItem, "UTF-8"));
+}
+encodedFSPath = new Path(fspathUri.getScheme(),
+fspathUri.getAuthority(), fsPathEncodedItems.substring(1));
+  }
+}
+
 final String path = PATH_PREFIX
-+ (fspath == null? "/": makeQualified(fspath).toUri().getRawPath());
++ (encodedFSPath == null ? "/" :
+makeQualified(encodedFSPath).toUri().getRawPath());
 final String query = op.toQueryString()
 + Param.toSortedString("&", getAuthParameters(op))
 + Param.toSortedString("&", parameters);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d29e3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index c5fc7ea..9a4b670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -58,6 +58,7 @@ import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.net.URLDecoder;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
@@ -127,7 +128,7 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 params = new ParameterParser(queryString, conf);
 DataNodeUGIProvider ugiProvider = new DataNodeUGIProvider(params);
 ugi = ugiProvider.ugi();
-path = params.path();

[40/50] hadoop git commit: HDFS-13141. WebHDFS: Add support for getting snasphottable directory list. Contributed by Lokesh Jain.

2018-03-13 Thread arp
HDFS-13141. WebHDFS: Add support for getting snasphottable directory list. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0355ec20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0355ec20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0355ec20

Branch: refs/heads/HDFS-12996
Commit: 0355ec20ebeb988679c7192c7024bef7a2a3bced
Parents: 45d1b0f
Author: Xiaoyu Yao 
Authored: Mon Mar 12 16:37:29 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon Mar 12 20:41:37 2018 -0700

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |  1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 ++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 42 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 20 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 75 +++-
 8 files changed, 184 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index bbd1bd7..3dcf13b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -88,6 +88,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
 SET_TIMES(CommonStatisticNames.OP_SET_TIMES),
 SET_XATTR("op_set_xattr"),
 GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
+GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 264e3f4..cb05c75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,7 +50,32 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED
+SNAPSHOT_ENABLED;
+
+/**
+ * Generates an enum set of Flags from a set of attr flags.
+ * @param attr Set of attr flags
+ * @return EnumSet of Flags
+ */
+public static EnumSet convert(Set attr) {
+  if (attr.isEmpty()) {
+return EnumSet.noneOf(Flags.class);
+  }
+  EnumSet flags = EnumSet.noneOf(Flags.class);
+  if (attr.contains(AttrFlags.HAS_ACL)) {
+flags.add(Flags.HAS_ACL);
+  }
+  if (attr.contains(AttrFlags.HAS_EC)) {
+flags.add(Flags.HAS_EC);
+  }
+  if (attr.contains(AttrFlags.HAS_CRYPT)) {
+flags.add(Flags.HAS_CRYPT);
+  }
+  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
+flags.add(Flags.SNAPSHOT_ENABLED);
+  }
+  return flags;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0355ec20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 2725e9c..aa79dc4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import 

[25/50] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."

2018-03-13 Thread arp
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.  
Contribute by Yuanbo Liu."

This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.

Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa6a8b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa6a8b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa6a8b78

Branch: refs/heads/HDFS-12996
Commit: fa6a8b78d481d3b4d355e1bf078f30dd5e09850d
Parents: 3a8dade
Author: Owen O'Malley 
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Wangda Tan 
Committed: Fri Mar 9 22:46:41 2018 -0800

--
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 119 -
 .../hadoop/http/TestHttpServerWithSpengo.java   | 481 ---
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 ---
 5 files changed, 13 insertions(+), 688 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
 import java.util.Map;
 
 /**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
  * 
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends 
FilterInitializer {
   public void initFilter(FilterContainer container, Configuration conf) {
 Map filterConfig = getFilterConfigMap(conf, PREFIX);
 
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
 container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 filterConfig);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
-import 

[07/50] hadoop git commit: HADOOP-15292. Distcp's use of pread is slowing it down. Contributed by Virajith Jalaparti.

2018-03-13 Thread arp
HADOOP-15292. Distcp's use of pread is slowing it down.
Contributed by Virajith Jalaparti.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bd6b1fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bd6b1fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bd6b1fd

Branch: refs/heads/HDFS-12996
Commit: 3bd6b1fd85c44354c777ef4fda6415231505b2a4
Parents: b451889
Author: Steve Loughran 
Authored: Thu Mar 8 11:15:46 2018 +
Committer: Steve Loughran 
Committed: Thu Mar 8 11:15:46 2018 +

--
 .../tools/mapred/RetriableFileCopyCommand.java  | 24 ++
 .../hadoop/tools/util/ThrottledInputStream.java | 48 +++-
 .../hadoop/tools/mapred/TestCopyMapper.java | 24 +-
 3 files changed, 66 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd6b1fd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 21f621a..0311061 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -260,7 +260,8 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 boolean finished = false;
 try {
   inStream = getInputStream(source, context.getConfiguration());
-  int bytesRead = readBytes(inStream, buf, sourceOffset);
+  seekIfRequired(inStream, sourceOffset);
+  int bytesRead = readBytes(inStream, buf);
   while (bytesRead >= 0) {
 if (chunkLength > 0 &&
 (totalBytesRead + bytesRead) >= chunkLength) {
@@ -276,7 +277,7 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 if (finished) {
   break;
 }
-bytesRead = readBytes(inStream, buf, sourceOffset);
+bytesRead = readBytes(inStream, buf);
   }
   outStream.close();
   outStream = null;
@@ -299,13 +300,20 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 context.setStatus(message.toString());
   }
 
-  private static int readBytes(ThrottledInputStream inStream, byte buf[],
-  long position) throws IOException {
+  private static int readBytes(ThrottledInputStream inStream, byte buf[])
+  throws IOException {
+try {
+  return inStream.read(buf);
+} catch (IOException e) {
+  throw new CopyReadException(e);
+}
+  }
+
+  private static void seekIfRequired(ThrottledInputStream inStream,
+  long sourceOffset) throws IOException {
 try {
-  if (position == 0) {
-return inStream.read(buf);
-  } else {
-return inStream.read(position, buf, 0, buf.length);
+  if (sourceOffset != inStream.getPos()) {
+inStream.seek(sourceOffset);
   }
 } catch (IOException e) {
   throw new CopyReadException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd6b1fd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
index 2d2f10c..4d3676a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.tools.util;
 
-import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,7 +33,7 @@ import java.io.InputStream;
  * (Thus, while the read-rate might exceed the maximum for a given short 
interval,
  * the average tends towards the specified maximum, overall.)
  */
-public class ThrottledInputStream extends InputStream {
+public class ThrottledInputStream extends InputStream implements Seekable {
 
   private final InputStream rawStream;
   private final float maxBytesPerSec;
@@ -95,25 +95,6 @@ public class ThrottledInputStream extends InputStream {
 return readLen;
   }
 
-  /**
-   * Read bytes starting from the specified position. This requires rawStream 
is
-   * an instance of {@link PositionedReadable}.
-   */
-  public int 

[14/50] hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao

2018-03-13 Thread arp
MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are 
both present twice in mapred-default.xml. Contributed by Sen Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fa3a63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fa3a63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fa3a63

Branch: refs/heads/HDFS-12996
Commit: 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf
Parents: 3f7bd46
Author: Jason Lowe 
Authored: Fri Mar 9 10:41:16 2018 -0600
Committer: Jason Lowe 
Committed: Fri Mar 9 10:41:16 2018 -0600

--
 .../src/main/resources/mapred-default.xml   | 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fa3a63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d0e5a2d..cf8be33 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -422,22 +422,6 @@
 
 
 
-  mapreduce.map.cpu.vcores
-  1
-  
-  The number of virtual cores required for each map task.
-  
-
-
-
-  mapreduce.reduce.cpu.vcores
-  1
-  
-  The number of virtual cores required for each reduce task.
-  
-
-
-
   mapreduce.reduce.merge.inmem.threshold
   1000
   The threshold, in terms of the number of files


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] hadoop git commit: HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran.

2018-03-13 Thread arp
HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log 
output.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f7bd467
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f7bd467
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f7bd467

Branch: refs/heads/HDFS-12996
Commit: 3f7bd467979042161897a7c91c5b094b83164f75
Parents: 122805b
Author: Steve Loughran 
Authored: Fri Mar 9 10:44:07 2018 +
Committer: Steve Loughran 
Committed: Fri Mar 9 10:44:07 2018 +

--
 .../hadoop-common/src/main/conf/log4j.properties  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f7bd467/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 5f4b22b..c31e54f 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -306,3 +306,6 @@ 
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize}
 #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+# Log levels of third-party libraries
+log4j.logger.org.apache.commons.beanutils=WARN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] hadoop git commit: HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma.

2018-03-13 Thread arp
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8211a3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8211a3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8211a3d4

Branch: refs/heads/HDFS-12996
Commit: 8211a3d4693fea46cff11c5883c16a9b4df7b4de
Parents: f82d38d
Author: Xiao Chen 
Authored: Tue Mar 13 10:48:35 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:48:45 2018 -0700

--
 .../hdfs/TestFileStatusWithDefaultECPolicy.java | 107 +++
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 102 --
 .../hdfs/TestFileStatusWithRandomECPolicy.java  |  49 +
 3 files changed, 156 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8211a3d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
new file mode 100644
index 000..a5a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private DFSClient client;
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(30);
+
+  @Before
+  public void before() throws IOException {
+HdfsConfiguration conf = new HdfsConfiguration();
+cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+client = fs.getClient();
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
+  }
+
+  @After
+  public void after() {
+if (cluster != null) {
+  cluster.shutdown();
+  cluster = null;
+}
+  }
+
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  @Test
+  public void testFileStatusWithECPolicy() throws Exception {
+// test directory doesn't have an EC policy
+final Path dir = new Path("/foo");
+assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
+ContractTestUtils.assertNotErasureCoded(fs, dir);
+assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
+// test file doesn't have an EC policy
+final Path file = new Path(dir, "foo");
+fs.create(file).close();
+assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
+ContractTestUtils.assertNotErasureCoded(fs, file);
+fs.delete(file, true);
+
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+// set EC policy on dir
+fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
+ContractTestUtils.assertErasureCoded(fs, dir);
+final ErasureCodingPolicy ecPolicy2 =
+client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
+assertNotNull(ecPolicy2);
+

[48/50] hadoop git commit: YARN-5764. NUMA awareness support for launching containers. Contributed by Devaraj K.

2018-03-13 Thread arp
YARN-5764. NUMA awareness support for launching containers. Contributed by 
Devaraj K.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a82d4a2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a82d4a2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a82d4a2e

Branch: refs/heads/HDFS-12996
Commit: a82d4a2e3a6a5448e371cef0cb86d5dbe4871ccd
Parents: 45cccad
Author: Miklos Szegedi 
Authored: Tue Mar 13 11:03:27 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue Mar 13 12:36:57 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  27 ++
 .../src/main/resources/yarn-default.xml |  51 +++
 .../nodemanager/LinuxContainerExecutor.java |  18 +-
 .../linux/privileged/PrivilegedOperation.java   |   3 +-
 .../linux/resources/ResourceHandlerModule.java  |  10 +
 .../linux/resources/numa/NumaNodeResource.java  | 204 +++
 .../resources/numa/NumaResourceAllocation.java  |  69 
 .../resources/numa/NumaResourceAllocator.java   | 342 +++
 .../resources/numa/NumaResourceHandlerImpl.java | 108 ++
 .../linux/resources/numa/package-info.java  |  28 ++
 .../numa/TestNumaResourceAllocator.java | 281 +++
 .../numa/TestNumaResourceHandlerImpl.java   | 181 ++
 12 files changed, 1318 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82d4a2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6677478..2afff43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3585,6 +3585,22 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
   DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
 
+  /**
+   * Settings for NUMA awareness.
+   */
+  public static final String NM_NUMA_AWARENESS_ENABLED = NM_PREFIX
+  + "numa-awareness.enabled";
+  public static final boolean DEFAULT_NM_NUMA_AWARENESS_ENABLED = false;
+  public static final String NM_NUMA_AWARENESS_READ_TOPOLOGY = NM_PREFIX
+  + "numa-awareness.read-topology";
+  public static final boolean DEFAULT_NM_NUMA_AWARENESS_READ_TOPOLOGY = false;
+  public static final String NM_NUMA_AWARENESS_NODE_IDS = NM_PREFIX
+  + "numa-awareness.node-ids";
+  public static final String NM_NUMA_AWARENESS_NUMACTL_CMD = NM_PREFIX
+  + "numa-awareness.numactl.cmd";
+  public static final String DEFAULT_NM_NUMA_AWARENESS_NUMACTL_CMD =
+  "/usr/bin/numactl";
+
   public YarnConfiguration() {
 super();
   }
@@ -3791,6 +3807,17 @@ public class YarnConfiguration extends Configuration {
 YarnConfiguration.DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED);
   }
 
+  /**
+   * Returns whether the NUMA awareness is enabled.
+   *
+   * @param conf the configuration
+   * @return whether the NUMA awareness is enabled.
+   */
+  public static boolean numaAwarenessEnabled(Configuration conf) {
+return conf.getBoolean(NM_NUMA_AWARENESS_ENABLED,
+DEFAULT_NM_NUMA_AWARENESS_ENABLED);
+  }
+
   /* For debugging. mp configurations to system output as XML format. */
   public static void main(String[] args) throws Exception {
 new YarnConfiguration(new Configuration()).writeXml(System.out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82d4a2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index adf8d8a..e192a0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3711,4 +3711,55 @@
 
   
 
+  
+
+Whether to enable the NUMA awareness for containers in Node Manager.
+
+yarn.nodemanager.numa-awareness.enabled
+false
+  
+
+  
+
+Whether to read the NUMA topology from the system or from the
+configurations. If the value is true then NM reads the NUMA topology from
+system 

[21/50] hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.

2018-03-13 Thread arp
HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afe1a3cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afe1a3cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afe1a3cc

Branch: refs/heads/HDFS-12996
Commit: afe1a3ccd56a12fec900360a8a2855c080728e65
Parents: ba0da27
Author: Inigo Goiri 
Authored: Fri Mar 9 17:18:51 2018 -0800
Committer: Inigo Goiri 
Committed: Fri Mar 9 17:18:51 2018 -0800

--
 .../federation/resolver/MountTableResolver.java | 15 +--
 .../resolver/TestMountTableResolver.java| 46 
 2 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index dac6f7f..2c7d1f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -238,9 +238,17 @@ public class MountTableResolver
   Entry entry = it.next();
   PathLocation loc = entry.getValue();
   String src = loc.getSourcePath();
-  if (src.startsWith(path)) {
-LOG.debug("Removing {}", src);
-it.remove();
+  if (src != null) {
+if (src.startsWith(path)) {
+  LOG.debug("Removing {}", src);
+  it.remove();
+}
+  } else {
+String dest = loc.getDefaultLocation().getDest();
+if (dest.startsWith(path)) {
+  LOG.debug("Removing default cache {}", dest);
+  it.remove();
+}
   }
 }
 
@@ -287,6 +295,7 @@ public class MountTableResolver
 if (!oldEntries.contains(srcPath)) {
   // Add node, it does not exist
   this.tree.put(srcPath, entry);
+  invalidateLocationCache(srcPath);
   LOG.info("Added new mount point {} to resolver", srcPath);
 } else {
   // Node exists, check for updates

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index a09daf0..f530fe9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -82,6 +83,7 @@ public class TestMountTableResolver {
 Configuration conf = new Configuration();
 conf.setInt(
 FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE);
+conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0");
 mountTable = new MountTableResolver(conf);
 
 // Root mount point
@@ -479,4 +481,48 @@ public class TestMountTableResolver {
 long cacheSize = mountTable.getCacheSize();
 assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE);
   }
+
+  @Test
+  public void testLocationCache() throws Exception {
+List entries = new ArrayList<>();
+
+// Add entry and test location cache
+Map map1 = getMountTableEntry("1", "/testlocationcache");
+MountTable entry1 = MountTable.newInstance("/testlocationcache", map1);
+entries.add(entry1);
+
+Map map2 = getMountTableEntry("2",
+"/anothertestlocationcache");
+MountTable entry2 = MountTable.newInstance("/anothertestlocationcache",
+map2);
+entries.add(entry2);
+mountTable.refreshEntries(entries);
+

[27/50] hadoop git commit: YARN-7523. Introduce description and version field in Service record. Contributed by Chandni Singh

2018-03-13 Thread arp
YARN-7523. Introduce description and version field in Service record. 
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1f5251f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1f5251f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1f5251f

Branch: refs/heads/HDFS-12996
Commit: e1f5251f3c0d6e74af1b52eda6633b728804fe2a
Parents: ea18e70
Author: Billie Rinaldi 
Authored: Sat Mar 10 07:49:10 2018 -0800
Committer: Billie Rinaldi 
Committed: Sat Mar 10 07:49:10 2018 -0800

--
 .../hadoop/yarn/service/webapp/ApiServer.java   |  4 +-
 .../definition/YARN-Services-Examples.md|  6 +++
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  7 +++
 .../hadoop/yarn/service/TestApiServer.java  |  1 +
 .../src/test/resources/example-app.json |  1 +
 .../examples/httpd-no-dns/httpd-no-dns.json |  1 +
 .../examples/httpd/httpd.json   |  1 +
 .../examples/sleeper/sleeper.json   |  1 +
 .../yarn/service/api/records/Service.java   | 46 +++-
 .../exceptions/RestApiErrorMessages.java|  2 +
 .../yarn/service/utils/ServiceApiUtil.java  |  6 +++
 .../hadoop/yarn/service/ServiceTestUtils.java   |  1 +
 .../hadoop/yarn/service/TestServiceApiUtil.java | 13 ++
 .../yarn/service/TestYarnNativeServices.java|  7 +++
 .../src/test/resources/example-app.json |  1 +
 .../service/conf/examples/app-override.json |  1 +
 .../hadoop/yarn/service/conf/examples/app.json  |  1 +
 .../yarn/service/conf/examples/default.json |  1 +
 .../yarn/service/conf/examples/external0.json   |  1 +
 .../yarn/service/conf/examples/external1.json   |  1 +
 .../yarn/service/conf/examples/external2.json   |  1 +
 .../markdown/yarn-service/YarnServiceAPI.md |  8 
 22 files changed, 108 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1f5251f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 0deeae7..e7979b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -123,8 +123,8 @@ public class ApiServer {
 return null;
   }
 });
-serviceStatus.setDiagnostics("Service "+service.getName() +
-" saved.");
+serviceStatus.setDiagnostics("Service " + service.getName() +
+" version " + service.getVersion() + " saved.");
   } else {
 ApplicationId applicationId = ugi
 .doAs(new PrivilegedExceptionAction() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1f5251f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index e4cdc7b..22f941e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -21,6 +21,8 @@ POST URL - http://localhost:9191/ws/v1/services
 ```json
 {
   "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example",
   "components" :
 [
   {
@@ -48,6 +50,8 @@ Note, lifetime value of -1 means unlimited lifetime.
 ```json
 {
 "name": "hello-world",
+"version": "1.0.0",
+"description": "hello world example",
 "id": "application_1503963985568_0002",
 "lifetime": -1,
 "components": [
@@ -154,6 +158,8 @@ POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
 ```json
 {
   "name": "hbase-app-1",
+  "version": "1.0.0",
+  "description": "hbase 

[35/50] hadoop git commit: HDFS-10803. TestBalancerWithMultipleNameNodes#testBalancing2OutOf3Blockpools fails intermittently due to no free space available. Contributed by Yiqun Lin.

2018-03-13 Thread arp
HDFS-10803. TestBalancerWithMultipleNameNodes#testBalancing2OutOf3Blockpools 
fails intermittently due to no free space available. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4afd50b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4afd50b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4afd50b1

Branch: refs/heads/HDFS-12996
Commit: 4afd50b10650a72162c40cf86dea44676013f262
Parents: 91c82c9
Author: Yiqun Lin 
Authored: Tue Mar 13 10:15:51 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 13 10:15:51 2018 +0800

--
 .../balancer/TestBalancerWithMultipleNameNodes.java   | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4afd50b1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index cf4c86f..c8929d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -416,13 +416,19 @@ public class TestBalancerWithMultipleNameNodes {
 }
 
 conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
+// Adjust the capacity of each DN since it will redistribute blocks
+// nNameNodes times in the following operations.
+long[] newCapacities = new long[nDataNodes];
+for (int i = 0; i < nDataNodes; i++) {
+  newCapacities[i] = capacities[i] * nNameNodes;
+}
 {
   LOG.info("UNEVEN 10");
   final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
   .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
   .numDataNodes(nDataNodes)
   .racks(racks)
-  .simulatedCapacities(capacities)
+  .simulatedCapacities(newCapacities)
   .format(false)
   .build();
   LOG.info("UNEVEN 11");
@@ -450,7 +456,7 @@ public class TestBalancerWithMultipleNameNodes {
   LOG.info("UNEVEN 13: n=" + n);
 }
 
-final long totalCapacity = TestBalancer.sum(capacities);
+final long totalCapacity = TestBalancer.sum(newCapacities);
 final long totalUsed = nNameNodes*usedSpacePerNN;
 LOG.info("UNEVEN 14");
 runBalancer(s, totalUsed, totalCapacity);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] hadoop git commit: HDFS-13241. RBF: TestRouterSafemode failed if the port 8888 is in use. Contributed by maobaolong.

2018-03-13 Thread arp
HDFS-13241. RBF: TestRouterSafemode failed if the port  is in use. 
Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91c82c90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91c82c90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91c82c90

Branch: refs/heads/HDFS-12996
Commit: 91c82c90f05ea75fe59c6ffad3dc3fcac1429e9e
Parents: ff31d8a
Author: Inigo Goiri 
Authored: Mon Mar 12 17:28:15 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Mar 12 17:28:15 2018 -0700

--
 .../hdfs/server/federation/router/TestRouterSafemode.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91c82c90/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
index 9299f77..e05f727 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
@@ -32,6 +32,7 @@ import java.net.URISyntaxException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Time;
@@ -65,6 +66,14 @@ public class TestRouterSafemode {
 // 2 sec post cache update before entering safemode (2 intervals)
 conf.setTimeDuration(DFS_ROUTER_SAFEMODE_EXPIRATION,
 TimeUnit.SECONDS.toMillis(2), TimeUnit.MILLISECONDS);
+
+conf.set(DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY, "0.0.0.0");
+conf.set(DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0");
+conf.set(DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+conf.set(DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "127.0.0.1:0");
+
 // RPC + State Store + Safe Mode only
 conf = new RouterConfigBuilder(conf)
 .rpc()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] hadoop git commit: Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma."

2018-03-13 Thread arp
Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma."

This reverts commit 84c10955863eca1e300aeeac1d9cd7a1186144b6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82d38dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82d38dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82d38dc

Branch: refs/heads/HDFS-12996
Commit: f82d38dcb3259dda6275c75765738fb9b249ee73
Parents: 3b8dbc2
Author: Xiao Chen 
Authored: Tue Mar 13 10:30:07 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:36:16 2018 -0700

--
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 ++
 .../hdfs/TestFileStatusWithRandomECPolicy.java  | 49 
 2 files changed, 5 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index a5a..077cf3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,10 +34,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-/**
- * This test ensures the statuses of EC files with the default policy.
- */
-public class TestFileStatusWithDefaultECPolicy {
+public class TestFileStatusWithECPolicy {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DFSClient client;
@@ -53,7 +50,8 @@ public class TestFileStatusWithDefaultECPolicy {
 cluster.waitActive();
 fs = cluster.getFileSystem();
 client = fs.getClient();
-fs.enableErasureCodingPolicy(getEcPolicy().getName());
+fs.enableErasureCodingPolicy(
+StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
   @After
@@ -64,10 +62,6 @@ public class TestFileStatusWithDefaultECPolicy {
 }
   }
 
-  public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
   @Test
   public void testFileStatusWithECPolicy() throws Exception {
 // test directory doesn't have an EC policy
@@ -82,7 +76,8 @@ public class TestFileStatusWithDefaultECPolicy {
 ContractTestUtils.assertNotErasureCoded(fs, file);
 fs.delete(file, true);
 
-final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+final ErasureCodingPolicy ecPolicy1 =
+StripedFileTestUtil.getDefaultECPolicy();
 // set EC policy on dir
 fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
 ContractTestUtils.assertErasureCoded(fs, dir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
deleted file mode 100644
index 18902a7..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This test extends TestFileStatusWithDefaultECPolicy to use a random
- * (non-default) EC policy.
- */
-public class TestFileStatusWithRandomECPolicy 

[47/50] hadoop git commit: HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei Jiang

2018-03-13 Thread arp
HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei 
Jiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cccadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cccadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cccadd

Branch: refs/heads/HDFS-12996
Commit: 45cccadd2e84b99ec56f1cc0e2248dc8fc844f38
Parents: 8211a3d
Author: Chris Douglas 
Authored: Tue Mar 13 11:08:11 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 11:08:11 2018 -0700

--
 .../src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cccadd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 2b3b529..eba4bee 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -527,7 +527,7 @@ public class DistCpUtils {
   /**
* Utility to compare checksums for the paths specified.
*
-   * If checksums's can't be retrieved, it doesn't fail the test
+   * If checksums can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] hadoop git commit: HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu Asanuma.

2018-03-13 Thread arp
HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9a6d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9a6d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9a6d9

Branch: refs/heads/HDFS-12996
Commit: 4cc9a6d9bb34329d6de30706d5432c7cb675bb88
Parents: 583f459
Author: Yiqun Lin 
Authored: Thu Mar 8 16:02:34 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Mar 8 16:02:34 2018 +0800

--
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9a6d9/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 9b2d9de..8b9cfda 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -223,7 +223,7 @@ functionality, except the mount table is managed on the 
server-side by the
 routing layer rather than on the client. This simplifies access to a federated
 cluster for existing HDFS clients.
 
-See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
 HDFS Router-based Federation
 [documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
 more details.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] hadoop git commit: HDFS-13190. Document WebHDFS support for snapshot diff

2018-03-13 Thread arp
HDFS-13190. Document WebHDFS support for snapshot diff

Signed-off-by: Akira Ajisaka 
Signed-off-by: Xiaoyu Yao 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b0dc310
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b0dc310
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b0dc310

Branch: refs/heads/HDFS-12996
Commit: 7b0dc310208ee5bc191c9accb3d1312513145653
Parents: 9a082fb
Author: Lokesh Jain 
Authored: Fri Mar 9 15:04:14 2018 -0800
Committer: Akira Ajisaka 
Committed: Fri Mar 9 15:06:15 2018 -0800

--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 92 
 1 file changed, 92 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b0dc310/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 4a1395e..057ca59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -50,6 +50,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`CHECKACCESS`](#Check_access) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access)
 * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies)
 * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
+* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
 *   HTTP PUT
 * [`CREATE`](#Create_and_Write_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
 * [`MKDIRS`](#Make_a_Directory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@@ -1266,6 +1267,21 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSna
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot
 
+### Get Snapshot Diff
+
+* Submit a HTTP GET request.
+
+curl -i GET "http://:/webhdfs/v1/?op=GETSNAPSHOTDIFF
+   
=="
+
+The client receives a response with a [`SnapshotDiffReport` JSON 
object](#SnapshotDiffReport_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+
{"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}}
+
 Delegation Token Operations
 ---
 
@@ -2043,6 +2059,82 @@ A `BlockStoragePolicies` JSON object represents an array 
of `BlockStoragePolicy`
 }
 ```
 
+### SnapshotDiffReport JSON Schema
+
+```json
+{
+  "name": "SnapshotDiffReport",
+  "type": "object",
+  "properties":
+  {
+"SnapshotDiffReport":
+{
+  "type": "object",
+  "properties"  :
+  {
+"diffList":
+{
+  "description": "An array of DiffReportEntry",
+  "type": "array",
+  "items"   : diffReportEntries,
+  "required": true
+},
+"fromSnapshot":
+{
+  "description": "Source snapshot",
+  "type": "string",
+  "required": true
+},
+"snapshotRoot":
+{
+  "description" : "String representation of snapshot root path",
+  "type": "string",
+  "required": true
+},
+"toSnapshot":
+{
+  "description" : "Destination snapshot",
+  "type": "string",
+  "required": true
+}
+  }
+}
+  }
+}
+```
+
+ DiffReport Entries
+
+JavaScript syntax is used to define `diffReportEntries` so that it can be 
referred in `SnapshotDiffReport` JSON schema.
+
+```javascript
+var diffReportEntries =
+{
+  "type": "object",
+  "properties":
+  {
+"sourcePath":
+{
+  "description" : "Source path name relative to snapshot root",
+  "type": "string",
+  "required": true
+},
+"targetPath":
+{
+  "description" : "Target path relative to snapshot root used for renames",
+  "type": "string",
+  "required": true
+},
+"type":
+{
+  "description" : "Type of diff report entry",
+  "enum": ["CREATE", "MODIFY", "DELETE", "RENAME"],
+  "required": true
+}
+  }
+}
+```
+
 HTTP Query Parameter Dictionary
 ---
 


-
To unsubscribe, e-mail: 

[43/50] hadoop git commit: HDFS-13239. Fix non-empty dir warning message when setting default EC policy. Contributed by Bharat Viswanadham.

2018-03-13 Thread arp
HDFS-13239. Fix non-empty dir warning message when setting default EC policy. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6931c30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6931c30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6931c30

Branch: refs/heads/HDFS-12996
Commit: d6931c30c5a643ca192109b05a9c44da42a6318c
Parents: 84c1095
Author: Xiao Chen 
Authored: Tue Mar 13 10:06:55 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:07:26 2018 -0700

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 12 -
 .../test/resources/testErasureCodingConf.xml| 28 +---
 2 files changed, 30 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6931c30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index e30b083..9b9fe14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,17 +358,15 @@ public class ECAdmin extends Configured implements Tool {
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
 if (ecPolicyName == null){
-  System.out.println("Set default erasure coding policy" +
-  " on " + path);
-} else {
-  System.out.println("Set erasure coding policy " + ecPolicyName +
-  " on " + path);
+  ecPolicyName = "default";
 }
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
-  "non-empty directory will not automatically convert existing" +
-  " files to " + ecPolicyName);
+  "non-empty directory will not automatically convert existing " +
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6931c30/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index fc0c060..2f7a6a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -214,7 +214,7 @@
   
 
   SubstringComparator
-  Set erasure coding policy RS-6-3-1024k on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -232,7 +232,7 @@
   
 
   SubstringComparator
-  Set erasure coding policy RS-6-3-1024k on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -311,7 +311,7 @@
   
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
 
   
 
@@ -696,6 +696,28 @@
 
 
 
+  setPolicy : set erasure coding policy without given a 
specific policy name on a non empty directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -mkdir /ecdir/ecsubdir
+-fs NAMENODE -setPolicy -path 
/ecdir
+  
+  
+-fs NAMENODE -rm -R /ecdir
+  
+  
+
+  SubstringComparator
+  Set default erasure coding policy on 
/ecdir
+
+
+  SubstringComparator
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
+
+  
+
+
+
   getPolicy: get the default policy after setPolicy without 
given a specific policy name
   
 -fs NAMENODE -mkdir /ecdir



[38/50] hadoop git commit: HDFS-13226. RBF: Throw the exception if mount table entry validated failed. Contributed by maobaolong.

2018-03-13 Thread arp
HDFS-13226. RBF: Throw the exception if mount table entry validated failed. 
Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19292bc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19292bc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19292bc2

Branch: refs/heads/HDFS-12996
Commit: 19292bc264cada5117ec76063d36cc88159afdf4
Parents: 7fab787
Author: Yiqun Lin 
Authored: Tue Mar 13 11:03:31 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 13 11:03:31 2018 +0800

--
 .../federation/store/records/BaseRecord.java| 16 ++--
 .../store/records/MembershipState.java  | 29 -
 .../federation/store/records/MountTable.java| 42 +++
 .../federation/store/records/RouterState.java   |  9 ++--
 .../federation/router/TestRouterAdminCLI.java   | 38 +++--
 .../store/records/TestMountTable.java   | 43 
 6 files changed, 137 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index 79f99c8..d5e60ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -32,6 +32,10 @@ import org.apache.hadoop.util.Time;
  * 
  */
 public abstract class BaseRecord implements Comparable {
+  public static final String ERROR_MSG_CREATION_TIME_NEGATIVE =
+  "The creation time for the record cannot be negative.";
+  public static final String ERROR_MSG_MODIFICATION_TIME_NEGATIVE =
+  "The modification time for the record cannot be negative.";
 
   /**
* Set the modification time for the record.
@@ -193,11 +197,15 @@ public abstract class BaseRecord implements 
Comparable {
 
   /**
* Validates the record. Called when the record is created, populated from 
the
-   * state store, and before committing to the state store.
-   * @return If the record is valid.
+   * state store, and before committing to the state store. If validate failed,
+   * there throws an exception.
*/
-  public boolean validate() {
-return getDateCreated() > 0 && getDateModified() > 0;
+  public void validate() {
+if (getDateCreated() <= 0) {
+  throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE);
+} else if (getDateModified() <= 0) {
+  throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
index ac0b22e..e33dedf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
@@ -37,6 +37,14 @@ import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerialize
  */
 public abstract class MembershipState extends BaseRecord
 implements FederationNamenodeContext {
+  public static final String ERROR_MSG_NO_NS_SPECIFIED =
+  "Invalid registration, no nameservice specified ";
+  public static final String ERROR_MSG_NO_WEB_ADDR_SPECIFIED =
+  "Invalid registration, no web address specified ";
+  public static final String ERROR_MSG_NO_RPC_ADDR_SPECIFIED =
+  "Invalid registration, no rpc address specified ";
+  public static final String ERROR_MSG_NO_BP_SPECIFIED =
+  "Invalid registration, no block pool specified ";
 
   /** Expiration time in ms for this entry. */
   private static long expirationMs;
@@ -226,26 +234,25 @@ public abstract class MembershipState extends BaseRecord
* is missing required information.
*/
   @Override
-  public boolean validate() {
-boolean ret = 

[42/50] hadoop git commit: HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. Contributed by Takanobu Asanuma.

2018-03-13 Thread arp
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84c10955
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84c10955
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84c10955

Branch: refs/heads/HDFS-12996
Commit: 84c10955863eca1e300aeeac1d9cd7a1186144b6
Parents: b2b9ce5
Author: Xiao Chen 
Authored: Tue Mar 13 09:57:20 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 09:58:03 2018 -0700

--
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 --
 .../hdfs/TestFileStatusWithRandomECPolicy.java  | 49 
 2 files changed, 59 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index 077cf3a..a5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,7 +34,10 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-public class TestFileStatusWithECPolicy {
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DFSClient client;
@@ -50,8 +53,7 @@ public class TestFileStatusWithECPolicy {
 cluster.waitActive();
 fs = cluster.getFileSystem();
 client = fs.getClient();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
   }
 
   @After
@@ -62,6 +64,10 @@ public class TestFileStatusWithECPolicy {
 }
   }
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Test
   public void testFileStatusWithECPolicy() throws Exception {
 // test directory doesn't have an EC policy
@@ -76,8 +82,7 @@ public class TestFileStatusWithECPolicy {
 ContractTestUtils.assertNotErasureCoded(fs, file);
 fs.delete(file, true);
 
-final ErasureCodingPolicy ecPolicy1 =
-StripedFileTestUtil.getDefaultECPolicy();
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
 // set EC policy on dir
 fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
 ContractTestUtils.assertErasureCoded(fs, dir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
new file mode 100644
index 000..18902a7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestFileStatusWithDefaultECPolicy to use a random
+ * (non-default) EC policy.
+ */
+public class TestFileStatusWithRandomECPolicy extends
+TestFileStatusWithDefaultECPolicy {
+  private static final Logger LOG = 

[41/50] hadoop git commit: HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with HdfsFileStatus as argument. Contributed by Lokesh Jain

2018-03-13 Thread arp
HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with 
HdfsFileStatus as argument. Contributed by Lokesh Jain


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2b9ce58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2b9ce58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2b9ce58

Branch: refs/heads/HDFS-12996
Commit: b2b9ce585984a1791a8af3e2287c75c75b95586f
Parents: 0355ec2
Author: Chris Douglas 
Authored: Tue Mar 13 09:43:22 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 09:43:22 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 +---
 .../protocol/SnapshottableDirectoryStatus.java  |  8 ++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 10 +---
 3 files changed, 10 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index cb05c75..264e3f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,32 +50,7 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED;
-
-/**
- * Generates an enum set of Flags from a set of attr flags.
- * @param attr Set of attr flags
- * @return EnumSet of Flags
- */
-public static EnumSet convert(Set attr) {
-  if (attr.isEmpty()) {
-return EnumSet.noneOf(Flags.class);
-  }
-  EnumSet flags = EnumSet.noneOf(Flags.class);
-  if (attr.contains(AttrFlags.HAS_ACL)) {
-flags.add(Flags.HAS_ACL);
-  }
-  if (attr.contains(AttrFlags.HAS_EC)) {
-flags.add(Flags.HAS_EC);
-  }
-  if (attr.contains(AttrFlags.HAS_CRYPT)) {
-flags.add(Flags.HAS_CRYPT);
-  }
-  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
-flags.add(Flags.SNAPSHOT_ENABLED);
-  }
-  return flags;
-}
+SNAPSHOT_ENABLED
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 6cdb2ee..0d35238 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -79,6 +79,14 @@ public class SnapshottableDirectoryStatus {
 this.parentFullPath = parentFullPath;
   }
 
+  public SnapshottableDirectoryStatus(HdfsFileStatus dirStatus,
+  int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+this.dirStatus = dirStatus;
+this.snapshotNumber = snapshotNumber;
+this.snapshotQuota = snapshotQuota;
+this.parentFullPath = parentFullPath;
+  }
+
   /**
* @return Number of snapshots that have been taken for the directory
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index aa79dc4..13c5226 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -65,7 +65,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 class JsonUtilClient {
   static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
@@ -772,15 +771,8 @@ class JsonUtilClient {
 byte[] parentFullPath = toByteArray((String) 

[44/50] hadoop git commit: HDFS-12587. Use Parameterized tests in TestBlockInfoStriped and TestLowRedundancyBlockQueues to test all EC policies. Contributed by Takanobu Asanuma.

2018-03-13 Thread arp
HDFS-12587. Use Parameterized tests in TestBlockInfoStriped and 
TestLowRedundancyBlockQueues to test all EC policies. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b8dbc2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b8dbc2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b8dbc2c

Branch: refs/heads/HDFS-12996
Commit: 3b8dbc2cb766ba9fc1d655c891d32f5b4e4aa9c8
Parents: d6931c3
Author: Xiao Chen 
Authored: Tue Mar 13 10:12:52 2018 -0700
Committer: Xiao Chen 
Committed: Tue Mar 13 10:14:05 2018 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 15 +
 .../blockmanagement/TestBlockInfoStriped.java   | 33 ++--
 .../TestLowRedundancyBlockQueues.java   | 19 +--
 3 files changed, 55 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b8dbc2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 13ca390..35edab9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -46,6 +46,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -591,4 +592,18 @@ public class StripedFileTestUtil {
 .getPolicies();
 return policies.get(1 + rand.nextInt(policies.size() - 1));
   }
+
+  /**
+   * Get all Erasure Coding Policies for Parameterized tests.
+   * @return Collection
+   */
+  public static Collection getECPolicies() {
+ArrayList params = new ArrayList<>();
+List policies =
+SystemErasureCodingPolicies.getPolicies();
+for (ErasureCodingPolicy policy: policies) {
+  params.add(new Object[]{policy});
+}
+return params;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b8dbc2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 1040d21..becf868 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -25,29 +25,42 @@ import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.mockito.internal.util.reflection.Whitebox;
 
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.nio.ByteBuffer;
+import java.util.Collection;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 /**
- * Test {@link BlockInfoStriped}
+ * Test {@link BlockInfoStriped}.
  */
+@RunWith(Parameterized.class)
 public class TestBlockInfoStriped {
   private static final long BASE_ID = -1600;
   private final Block baseBlock = new Block(BASE_ID);
-  private final ErasureCodingPolicy testECPolicy
-  = StripedFileTestUtil.getDefaultECPolicy();
-  private final int totalBlocks = testECPolicy.getNumDataUnits() +
-  testECPolicy.getNumParityUnits();
-  private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
-  testECPolicy);
+  private final ErasureCodingPolicy testECPolicy;
+  private final int totalBlocks;
+  private final BlockInfoStriped info;
+
+  public TestBlockInfoStriped(ErasureCodingPolicy policy) {
+testECPolicy = policy;
+totalBlocks = testECPolicy.getNumDataUnits()
++ testECPolicy.getNumParityUnits();
+info = new BlockInfoStriped(baseBlock, testECPolicy);
+  }
+
+  @Parameterized.Parameters(name = "{index}: {0}")
+  public static Collection policies() {
+return StripedFileTestUtil.getECPolicies();
+  }
 
   private 

[36/50] hadoop git commit: HDFS-12156. TestFSImage fails without -Pnative

2018-03-13 Thread arp
HDFS-12156. TestFSImage fails without -Pnative


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/319defaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/319defaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/319defaf

Branch: refs/heads/HDFS-12996
Commit: 319defafc105c0d0b69b83828b578d9c453036f5
Parents: 4afd50b
Author: Akira Ajisaka 
Authored: Tue Mar 13 11:26:48 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 13 11:26:56 2018 +0900

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFSImage.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/319defaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 38a6dab..ba08f73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -74,6 +75,7 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Assume;
 import org.junit.Test;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -99,6 +101,13 @@ public class TestFSImage {
 setCompressCodec(conf, "org.apache.hadoop.io.compress.DefaultCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.GzipCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.BZip2Codec");
+  }
+
+  @Test
+  public void testNativeCompression() throws IOException {
+Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
 setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec");
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] hadoop git commit: YARN-8024. LOG in class MaxRunningAppsEnforcer is initialized with a faulty class. Contributed by Sen Zhao.

2018-03-13 Thread arp
YARN-8024. LOG in class MaxRunningAppsEnforcer is initialized with a faulty 
class. Contributed by Sen Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff31d8ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff31d8ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff31d8ae

Branch: refs/heads/HDFS-12996
Commit: ff31d8aefa0490ccf1d44fe8a738fdc002aa712c
Parents: 39a5fba
Author: Yufei Gu 
Authored: Mon Mar 12 16:35:18 2018 -0700
Committer: Yufei Gu 
Committed: Mon Mar 12 16:35:26 2018 -0700

--
 .../resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff31d8ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 02e2d97..3f1ad0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -37,7 +37,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicat
  * constraints
  */
 public class MaxRunningAppsEnforcer {
-  private static final Log LOG = LogFactory.getLog(FairScheduler.class);
+  private static final Log LOG = LogFactory.getLog(
+  MaxRunningAppsEnforcer.class);
   
   private final FairScheduler scheduler;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] hadoop git commit: HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. Contributed by Erik Krogen

2018-03-13 Thread arp
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. 
Contributed by Erik Krogen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6994da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6994da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6994da

Branch: refs/heads/HDFS-12996
Commit: 9d6994da1964c1125a33b3a65e7a7747e2d0bc59
Parents: a82d4a2
Author: Chris Douglas 
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 13:55:18 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 12 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 23 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
   "hadoop.http.socket.backlog.size";
   public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_ACCEPTOR_COUNT_KEY =
+  "hadoop.http.acceptor.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+  public static final String HTTP_SELECTOR_COUNT_KEY =
+  "hadoop.http.selector.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
   public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
 
 private ServerConnector createHttpChannelConnector(
 Server server, HttpConfiguration httpConfig) {
-  ServerConnector conn = new ServerConnector(server);
+  ServerConnector conn = new ServerConnector(server,
+  conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+  conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
   ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
   conn.addConnectionFactory(connFactory);
   configureChannelConnector(conn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
 Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
 server = createTestServer(conf);
 server.addServlet("echo", "/echo", EchoServlet.class);
 server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 ready.await();
 start.countDown();
   }
+
+  /**
+   * Test that the number of acceptors and selectors can be configured by
+   * trying to configure more of them than would be allowed based on the
+   * maximum thread count.
+   */
+  @Test
+  public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+conf.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, MAX_THREADS - 2);
+HttpServer2 badserver = createTestServer(conf);
+try {
+  

[50/50] hadoop git commit: Merge remote-tracking branch 'apache-commit/trunk' into HDFS-12996

2018-03-13 Thread arp
Merge remote-tracking branch 'apache-commit/trunk' into HDFS-12996


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d4aa014
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d4aa014
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d4aa014

Branch: refs/heads/HDFS-12996
Commit: 8d4aa014d7130206f59bbd1e439465c8ea86572e
Parents: 85a0ed7 9d6994d
Author: Arpit Agarwal 
Authored: Tue Mar 13 15:27:31 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Mar 13 15:27:31 2018 -0700

--
 BUILDING.txt|   4 +-
 NOTICE.txt  |  14 +
 dev-support/bin/verify-license-files| 145 
 dev-support/docker/Dockerfile   |   2 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |   2 +-
 .../main/resources/checkstyle/checkstyle.xml|   5 +-
 .../hadoop-cloud-storage/pom.xml|   7 +-
 hadoop-common-project/hadoop-auth/pom.xml   |  12 +
 hadoop-common-project/hadoop-common/pom.xml |  26 +-
 .../src/main/conf/log4j.properties  |   3 +
 .../java/org/apache/hadoop/fs/FileStatus.java   |   2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |   2 +-
 .../org/apache/hadoop/http/HttpServer2.java |  12 +-
 .../org/apache/hadoop/io/retry/RetryPolicy.java |   7 +-
 .../java/org/apache/hadoop/log/Log4Json.java|   2 +-
 .../java/org/apache/hadoop/net/NetUtils.java| 159 ++--
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 124 
 .../security/RuleBasedLdapGroupsMapping.java|  91 +++
 .../web/DelegationTokenAuthenticator.java   |  17 +-
 .../src/main/resources/core-default.xml |  24 +
 .../src/site/markdown/SecureMode.md |   4 +-
 .../src/site/markdown/SingleCluster.md.vm   |   8 +
 .../conf/TestCommonConfigurationFields.java |   4 +-
 .../org/apache/hadoop/http/TestHttpServer.java  |  23 +-
 .../hadoop/http/TestHttpServerWithSpengo.java   | 494 -
 .../org/apache/hadoop/log/TestLogLevel.java |   4 +-
 .../org/apache/hadoop/net/TestNetUtils.java |  40 +
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 --
 .../TestRuleBasedLdapGroupsMapping.java |  99 +++
 .../apache/hadoop/test/GenericTestUtils.java|  68 +-
 hadoop-common-project/hadoop-kms/pom.xml|   2 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java |   2 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   |   6 +-
 .../crypto/key/kms/server/TestKMSWithZK.java|  39 -
 .../org/apache/hadoop/minikdc/TestMiniKdc.java  |   3 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  15 +
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   2 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  63 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  88 ++-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  46 +-
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   2 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   3 +-
 .../hadoop/hdfs/protocol/LocatedBlocks.java |  12 +-
 .../hdfs/protocol/SnapshotDiffReport.java   |   4 +
 .../protocol/SnapshottableDirectoryStatus.java  |   8 +
 .../ha/RequestHedgingProxyProvider.java |   6 +
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  83 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  57 +-
 .../hadoop/hdfs/web/resources/GetOpParam.java   |   4 +-
 .../ha/TestRequestHedgingProxyProvider.java |  45 ++
 .../hadoop/fs/http/client/HttpFSFileSystem.java |   1 +
 .../hadoop/fs/http/server/FSOperations.java |  20 +-
 .../http/server/HttpFSParametersProvider.java   |  30 +-
 .../hadoop/fs/http/server/HttpFSServer.java |  18 +-
 .../hadoop/fs/http/server/TestHttpFSServer.java | 174 +
 .../http/server/TestHttpFSServerWebServer.java  |  10 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../java/org/apache/hadoop/hdfs/HAUtil.java |  16 +-
 .../NamenodeProtocolServerSideTranslatorPB.java |   3 +-
 .../NamenodeProtocolTranslatorPB.java   |   5 +-
 .../qjournal/client/QuorumJournalManager.java   |   4 +-
 .../token/block/BlockTokenSecretManager.java|  71 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   2 +-
 .../hdfs/server/balancer/NameNodeConnector.java |   5 +-
 .../server/blockmanagement/BlockManager.java|  17 +-
 .../server/datanode/BlockRecoveryWorker.java|   6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  39 +-
 .../hdfs/server/datanode/DiskBalancer.java  |  31 +-
 .../server/datanode/SecureDataNodeStarter.java  |  47 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java|   3 +-
 .../diskbalancer/DiskBalancerConstants.java |   5 -
 

[20/50] hadoop git commit: HDFS-13252. Code refactoring: Remove Diff.ListType.

2018-03-13 Thread arp
HDFS-13252. Code refactoring: Remove Diff.ListType.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba0da278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba0da278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba0da278

Branch: refs/heads/HDFS-12996
Commit: ba0da2785d251745969f88a50d33ce61876d91aa
Parents: 4eeff62
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Mar 9 15:25:41 2018 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Mar 9 15:50:26 2018 -0800

--
 .../hdfs/server/namenode/FSDirRenameOp.java |   3 +-
 .../hdfs/server/namenode/INodeDirectory.java|  10 +-
 .../hdfs/server/namenode/INodeReference.java|   4 -
 .../snapshot/DirectorySnapshottableFeature.java |   5 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  | 131 +++
 .../snapshot/FSImageFormatPBSnapshot.java   |   6 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  11 +-
 .../snapshot/SnapshotDiffListingInfo.java   |  15 +--
 .../snapshot/SnapshotFSImageFormat.java |   4 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java  | 131 +--
 .../namenode/snapshot/SnapshotTestHelper.java   |  79 ++-
 .../snapshot/TestRenameWithSnapshots.java   | 129 +++---
 .../snapshot/TestSetQuotaWithSnapshot.java  |   6 +-
 13 files changed, 260 insertions(+), 274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index efc8da2..6162ceb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -588,8 +588,7 @@ class FSDirRenameOp {
 private INode srcChild;
 private INode oldDstChild;
 
-RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP)
-throws QuotaExceededException {
+RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP) 
{
   this.fsd = fsd;
   this.srcIIP = srcIIP;
   this.dstIIP = dstIIP;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 6594a56..72ad9e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -39,7 +39,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFea
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-import org.apache.hadoop.hdfs.util.Diff.ListType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -353,7 +352,7 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
 // replace the instance in the created list of the diff list
 DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
 if (sf != null) {
-  sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
+  sf.getDiffs().replaceCreatedChild(oldChild, newChild);
 }
 
 // update the inodeMap
@@ -746,8 +745,8 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
   final INode newChild) throws QuotaExceededException {
 DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
 assert sf != null : "Directory does not have snapshot feature";
-sf.getDiffs().removeChild(ListType.DELETED, oldChild);
-sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
+sf.getDiffs().removeDeletedChild(oldChild);
+sf.getDiffs().replaceCreatedChild(oldChild, newChild);
 addChild(newChild, true, Snapshot.CURRENT_STATE_ID);
   }
   
@@ -761,8 +760,7 

  1   2   >