hadoop git commit: MAPREDUCE-6252. JobHistoryServer should not fail when encountering a missing directory. Contributed by Craig Welch.

2015-04-27 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 618ba707f - 5e67c4d38


MAPREDUCE-6252. JobHistoryServer should not fail when encountering a
missing directory. Contributed by Craig Welch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e67c4d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e67c4d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e67c4d3

Branch: refs/heads/trunk
Commit: 5e67c4d384193b38a85655c8f93193596821faa5
Parents: 618ba70
Author: Devaraj K deva...@apache.org
Authored: Mon Apr 27 15:01:42 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Apr 27 15:01:42 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/v2/hs/HistoryFileManager.java | 19 ++---
 .../mapreduce/v2/hs/TestHistoryFileManager.java | 22 
 3 files changed, 37 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e67c4d3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4166e6a..f9488fb 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -349,6 +349,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6333. TestEvents,TestAMWebServicesTasks,TestAppController are
 broken due to MAPREDUCE-6297. (Siqi Li via gera)
 
+MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
+missing directory. (Craig Welch via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e67c4d3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 65f8a4f..69f814d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -740,17 +740,22 @@ public class HistoryFileManager extends AbstractService {
 }
   }
 
-  private static ListFileStatus scanDirectory(Path path, FileContext fc,
+  @VisibleForTesting
+  protected static ListFileStatus scanDirectory(Path path, FileContext fc,
   PathFilter pathFilter) throws IOException {
 path = fc.makeQualified(path);
 ListFileStatus jhStatusList = new ArrayListFileStatus();
-RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
-while (fileStatusIter.hasNext()) {
-  FileStatus fileStatus = fileStatusIter.next();
-  Path filePath = fileStatus.getPath();
-  if (fileStatus.isFile()  pathFilter.accept(filePath)) {
-jhStatusList.add(fileStatus);
+try {
+  RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
+  while (fileStatusIter.hasNext()) {
+FileStatus fileStatus = fileStatusIter.next();
+Path filePath = fileStatus.getPath();
+if (fileStatus.isFile()  pathFilter.accept(filePath)) {
+  jhStatusList.add(fileStatus);
+}
   }
+} catch (FileNotFoundException fe) {
+  LOG.error(Error while scanning directory  + path, fe);
 }
 return jhStatusList;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e67c4d3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
index e2e943a..1c5cc5c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
@@ -21,13 +21,17 @@ package 

hadoop git commit: MAPREDUCE-6252. JobHistoryServer should not fail when encountering a missing directory. Contributed by Craig Welch.

2015-04-27 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b73cfd7eb - 84ae26573


MAPREDUCE-6252. JobHistoryServer should not fail when encountering a
missing directory. Contributed by Craig Welch.

(cherry picked from commit 5e67c4d384193b38a85655c8f93193596821faa5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84ae2657
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84ae2657
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84ae2657

Branch: refs/heads/branch-2
Commit: 84ae26573c4fccbaab042b7f639bf1d5a1960281
Parents: b73cfd7
Author: Devaraj K deva...@apache.org
Authored: Mon Apr 27 15:01:42 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Apr 27 15:02:44 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/v2/hs/HistoryFileManager.java | 19 ++---
 .../mapreduce/v2/hs/TestHistoryFileManager.java | 22 
 3 files changed, 37 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ae2657/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a1d3523..bdbe3c5 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -101,6 +101,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6333. TestEvents,TestAMWebServicesTasks,TestAppController are
 broken due to MAPREDUCE-6297. (Siqi Li via gera)
 
+MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
+missing directory. (Craig Welch via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ae2657/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 6b9f146..6420303 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -740,17 +740,22 @@ public class HistoryFileManager extends AbstractService {
 }
   }
 
-  private static ListFileStatus scanDirectory(Path path, FileContext fc,
+  @VisibleForTesting
+  protected static ListFileStatus scanDirectory(Path path, FileContext fc,
   PathFilter pathFilter) throws IOException {
 path = fc.makeQualified(path);
 ListFileStatus jhStatusList = new ArrayListFileStatus();
-RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
-while (fileStatusIter.hasNext()) {
-  FileStatus fileStatus = fileStatusIter.next();
-  Path filePath = fileStatus.getPath();
-  if (fileStatus.isFile()  pathFilter.accept(filePath)) {
-jhStatusList.add(fileStatus);
+try {
+  RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
+  while (fileStatusIter.hasNext()) {
+FileStatus fileStatus = fileStatusIter.next();
+Path filePath = fileStatus.getPath();
+if (fileStatus.isFile()  pathFilter.accept(filePath)) {
+  jhStatusList.add(fileStatus);
+}
   }
+} catch (FileNotFoundException fe) {
+  LOG.error(Error while scanning directory  + path, fe);
 }
 return jhStatusList;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ae2657/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
index e2e943a..1c5cc5c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
+++ 

[04/50] [abbrv] hadoop git commit: YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the correct user. Contributed by Zhijie Shen

2015-04-27 Thread zjshen
YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the correct 
user. Contributed by Zhijie Shen

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78ac5d3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78ac5d3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78ac5d3f

Branch: refs/heads/YARN-2928
Commit: 78ac5d3f9e5a7b3e4fc8fa898f6b02cea45c00b9
Parents: d274d3c
Author: Jian He jia...@apache.org
Authored: Thu Apr 23 11:07:26 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:32 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../distributedshell/ApplicationMaster.java | 165 +--
 .../distributedshell/TestDSAppMaster.java   |  41 +
 .../distributedshell/TestDSFailedAppMaster.java |   2 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  10 ++
 .../client/api/impl/TimelineClientImpl.java |   5 +
 6 files changed, 135 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ac5d3f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 34ca05d..5df2cc7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -341,6 +341,9 @@ Release 2.7.1 - UNRELEASED
 YARN-2605. [RM HA] Rest api endpoints doing redirect incorrectly.
 (Xuan Gong via stevel)
 
+YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the
+correct user. (Zhijie Shen via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ac5d3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index c37fdd0..2470235 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -192,13 +192,14 @@ public class ApplicationMaster {
   private AMRMClientAsync amRMClient;
 
   // In both secure and non-secure modes, this points to the job-submitter.
-  private UserGroupInformation appSubmitterUgi;
+  @VisibleForTesting
+  UserGroupInformation appSubmitterUgi;
 
   // Handle to communicate with the Node Manager
   private NMClientAsync nmClientAsync;
   // Listen to process the response from the Node Manager
   private NMCallbackHandler containerListener;
-  
+
   // Application Attempt Id ( combination of attemptId and fail count )
   @VisibleForTesting
   protected ApplicationAttemptId appAttemptID;
@@ -283,7 +284,8 @@ public class ApplicationMaster {
   private ListThread launchThreads = new ArrayListThread();
 
   // Timeline Client
-  private TimelineClient timelineClient;
+  @VisibleForTesting
+  TimelineClient timelineClient;
 
   private final String linux_bash_command = bash;
   private final String windows_command = cmd /c;
@@ -531,7 +533,7 @@ public class ApplicationMaster {
 .getOptionValue(priority, 0));
 
 if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
-  YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
+YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
   if (cliParser.hasOption(timeline_service_version)) {
 String timelineServiceVersion =
 cliParser.getOptionValue(timeline_service_version, v1);
@@ -544,24 +546,12 @@ public class ApplicationMaster {
   timeline_service_version is not set properly, should be 'v1' or 
'v2');
 }
   }
-  // Creating the Timeline Client
-  if 

[43/50] [abbrv] hadoop git commit: YARN-3464. Race condition in LocalizerRunner kills localizer before localizing all resources. (Zhihai Xu via kasha)

2015-04-27 Thread zjshen
YARN-3464. Race condition in LocalizerRunner kills localizer before localizing 
all resources. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dc1af59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dc1af59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dc1af59

Branch: refs/heads/YARN-2928
Commit: 7dc1af59a1f9d1921eca6337fc60d15931cf7650
Parents: c950890
Author: Karthik Kambatla ka...@apache.org
Authored: Sun Apr 26 09:13:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../container/ContainerImpl.java|  8 ++-
 .../localizer/ResourceLocalizationService.java  | 53 +++-
 .../localizer/event/LocalizationEventType.java  |  1 +
 .../TestResourceLocalizationService.java| 12 -
 5 files changed, 62 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc1af59/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f87d2d5..50464f8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
 invoked (Brahma Reddy Battula via jlowe)
 
+YARN-3464. Race condition in LocalizerRunner kills localizer before 
+localizing all resources. (Zhihai Xu via kasha)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc1af59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index c9874a6..68669aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -59,7 +59,9 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
@@ -716,7 +718,12 @@ public class ContainerImpl implements Container {
 return ContainerState.LOCALIZING;
   }
 
+  container.dispatcher.getEventHandler().handle(
+  new ContainerLocalizationEvent(LocalizationEventType.
+  CONTAINER_RESOURCES_LOCALIZED, container));
+
   container.sendLaunchEvent();
+  container.metrics.endInitingContainer();
 
   // If this is a recovered container that has already launched, skip
   // uploading resources to the shared cache. We do this to avoid uploading
@@ -734,7 +741,6 @@ public class ContainerImpl implements Container {
 SharedCacheUploadEventType.UPLOAD));
   }
 
-  container.metrics.endInitingContainer();
   return ContainerState.LOCALIZED;
 }
   }


[34/50] [abbrv] hadoop git commit: YARN-3406. Display count of running containers in the RM's Web UI. Contributed by Ryu Kobayashi.

2015-04-27 Thread zjshen
YARN-3406. Display count of running containers in the RM's Web UI. Contributed 
by Ryu Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1164c7bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1164c7bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1164c7bd

Branch: refs/heads/YARN-2928
Commit: 1164c7bd8ed71dc8bc6e3f28b3aa2a38105dfae0
Parents: 8257b1d
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sat Apr 25 07:17:11 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../hadoop/yarn/server/webapp/WebPageUtils.java | 25 +---
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  9 +++
 .../webapp/FairSchedulerAppsBlock.java  |  2 ++
 .../webapp/FairSchedulerPage.java   |  2 +-
 .../resourcemanager/webapp/RMAppsBlock.java |  6 -
 6 files changed, 37 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1164c7bd/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 84d7383..605079e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -232,6 +232,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3511. Add errors and warnings page to ATS. (Varun Vasudev via xgong)
 
+YARN-3406. Display count of running containers in the RM's Web UI.
+(Ryu Kobayashi via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1164c7bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index 5acabf5..6ca5011 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -24,10 +24,11 @@ import static 
org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 public class WebPageUtils {
 
   public static String appsTableInit() {
-return appsTableInit(false);
+return appsTableInit(false, true);
   }
 
-  public static String appsTableInit(boolean isFairSchedulerPage) {
+  public static String appsTableInit(
+  boolean isFairSchedulerPage, boolean isResourceManager) {
 // id, user, name, queue, starttime, finishtime, state, status, progress, 
ui
 // FairSchedulerPage's table is a bit different
 return tableInit()
@@ -35,22 +36,30 @@ public class WebPageUtils {
   .append(, bDeferRender: true)
   .append(, bProcessing: true)
   .append(\n, aoColumnDefs: )
-  .append(getAppsTableColumnDefs(isFairSchedulerPage))
+  .append(getAppsTableColumnDefs(isFairSchedulerPage, isResourceManager))
   // Sort by id upon page load
   .append(, aaSorting: [[0, 'desc']]}).toString();
   }
 
-  private static String getAppsTableColumnDefs(boolean isFairSchedulerPage) {
+  private static String getAppsTableColumnDefs(
+  boolean isFairSchedulerPage, boolean isResourceManager) {
 StringBuilder sb = new StringBuilder();
-return sb
-  .append([\n)
+sb.append([\n)
   .append({'sType':'string', 'aTargets': [0])
   .append(, 'mRender': parseHadoopID })
   .append(\n, {'sType':'numeric', 'aTargets':  +
   (isFairSchedulerPage ? [6, 7]: [5, 6]))
   .append(, 'mRender': renderHadoopDate })
-  .append(\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9])
-  .append(, 'mRender': parseHadoopProgress }]).toString();
+  .append(\n, {'sType':'numeric', bSearchable:false, 'aTargets':);
+if (isFairSchedulerPage) {
+  sb.append([11]);
+} else if (isResourceManager) {
+  sb.append([10]);
+} else {
+  sb.append([9]);
+}
+sb.append(, 'mRender': parseHadoopProgress }]);
+return sb.toString();
   }
 
   public static String attemptsTableInit() {


[18/50] [abbrv] hadoop git commit: YARN-3516. killing ContainerLocalizer action doesn't take effect when private localizer receives FETCH_FAILURE status. Contributed by zhihai xu

2015-04-27 Thread zjshen
YARN-3516. killing ContainerLocalizer action doesn't take effect when
private localizer receives FETCH_FAILURE status. Contributed by zhihai
xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45ccd919
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45ccd919
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45ccd919

Branch: refs/heads/YARN-2928
Commit: 45ccd91903d88807149b2f9fa205ab624b28376d
Parents: d6fc3e4
Author: Xuan xg...@apache.org
Authored: Thu Apr 23 16:40:40 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../containermanager/localizer/ResourceLocalizationService.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ccd919/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5df2cc7..589952f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -321,6 +321,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3434. Interaction between reservations and userlimit can result in 
 significant ULF violation (tgraves)
 
+YARN-3516. killing ContainerLocalizer action doesn't take effect when
+private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ccd919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4236392..611fe80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -989,7 +989,7 @@ public class ResourceLocalizationService extends 
CompositeService
   case FETCH_FAILURE:
 final String diagnostics = stat.getException().toString();
 LOG.warn(req +  failed:  + diagnostics);
-response.setLocalizerAction(LocalizerAction.DIE);
+action = LocalizerAction.DIE;
 getLocalResourcesTracker(req.getVisibility(), user, applicationId)
   .handle(new ResourceFailedLocalizationEvent(
   req, diagnostics));



[15/50] [abbrv] hadoop git commit: HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

2015-04-27 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
deleted file mode 100644
index 0056078..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ /dev/null
@@ -1,484 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
-import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
-import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-class JsonUtilClient {
-  static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
-
-  /** Convert a Json map to a RemoteException. */
-  static RemoteException toRemoteException(final Map?, ? json) {
-final Map?, ? m = (Map?, 
?)json.get(RemoteException.class.getSimpleName());
-final String message = (String)m.get(message);
-final String javaClassName = (String)m.get(javaClassName);
-return new RemoteException(javaClassName, message);
-  }
-
-  /** Convert a Json map to a Token. */
-  static Token? extends TokenIdentifier toToken(
-  final Map?, ? m) throws IOException {
-if (m == null) {
-  return null;
-}
-
-final TokenDelegationTokenIdentifier token
-= new Token();
-token.decodeFromUrlString((String)m.get(urlString));
-return token;
-  }
-
-  /** Convert a Json map to a Token of BlockTokenIdentifier. */
-  @SuppressWarnings(unchecked)
-  static TokenBlockTokenIdentifier toBlockToken(
-  final Map?, ? m) throws IOException {
-return (TokenBlockTokenIdentifier)toToken(m);
-  }
-
-  /** Convert a string to a FsPermission object. */
-  static FsPermission toFsPermission(
-  final String s, Boolean aclBit, Boolean encBit) {
-FsPermission perm = new FsPermission(Short.parseShort(s, 8));
-final boolean aBit = (aclBit != null) ? aclBit : false;
-final boolean eBit = (encBit != null) ? encBit : false;
-if (aBit || eBit) {
-  return new FsPermissionExtension(perm, aBit, eBit);
-} else {
-  return perm;
-}
-  }
-
-  /** Convert a Json map to a HdfsFileStatus object. */
-  static HdfsFileStatus toFileStatus(final Map?, ? json, boolean 
includesType) {
-if (json == null) {
-  return null;
-}
-
-final Map?, ? m = includesType ?
-(Map?, 

[19/50] [abbrv] hadoop git commit: HDFS-8110. Remove unsupported 'hdfs namenode -rollingUpgrade downgrade' from document. Contributed by J.Andreina.

2015-04-27 Thread zjshen
HDFS-8110. Remove unsupported 'hdfs namenode -rollingUpgrade downgrade' from 
document. Contributed by J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50eb78d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50eb78d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50eb78d4

Branch: refs/heads/YARN-2928
Commit: 50eb78d456ef2e25458d17cd1c64a4c0be55a0b3
Parents: f01a146
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Apr 24 20:32:26 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../src/site/xdoc/HdfsRollingUpgrade.xml| 26 +++-
 2 files changed, 6 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50eb78d4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b442bad..56f8ec3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -315,6 +315,9 @@ Trunk (Unreleased)
 HDFS-4681. 
TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
 fails using IBM java (Ayappan via aw)
 
+HDFS-8110. Remove unsupported 'hdfs namenode -rollingUpgrade downgrade'
+from document. (J.Andreina via aajisaka)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50eb78d4/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index 1c3dc60..f0b0ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -190,14 +190,12 @@
 only if both the namenode layout version and the datenode layout version
 are not changed between these two releases.
   /p
-
-  subsection name=Downgrade without Downtime id=DowngradeWithoutDowntime
   p
 In a HA cluster,
 when a rolling upgrade from an old software release to a new software 
release is in progress,
 it is possible to downgrade, in a rolling fashion, the upgraded machines 
back to the old software release.
 Same as before, suppose emNN1/em and emNN2/em are respectively in 
active and standby states.
-Below are the steps for rolling downgrade:
+Below are the steps for rolling downgrade without downtime:
   /p
   ol
 liDowngrade emDNs/emol
@@ -214,16 +212,12 @@
 /ol/li
 liDowngrade Active and Standby emNNs/emol
   liShutdown and downgrade emNN2/em./li
-  liStart emNN2/em as standby normally. (Note that it is incorrect 
to use the
-a href=#namenode_-rollingUpgradecode-rollingUpgrade 
downgrade/code/a
-option here.)
+  liStart emNN2/em as standby normally.
   /li
   liFailover from emNN1/em to emNN2/em
 so that emNN2/em becomes active and emNN1/em becomes 
standby./li
   liShutdown and upgrade emNN1/em./li
-  liStart emNN1/em as standby normally. (Note that it is incorrect 
to use the
-a href=#namenode_-rollingUpgradecode-rollingUpgrade 
downgrade/code/a
-option here.)
+  liStart emNN1/em as standby normally.
   /li
 /ol/li
 liFinalize Rolling Downgradeul
@@ -236,20 +230,6 @@
 since protocols may be changed in a backward compatible manner but not 
forward compatible,
 i.e. old datanodes can talk to the new namenodes but not vice versa.
   /p
-  /subsection
-  subsection name=Downgrade with Downtime id=DowngradeWithDowntime
-  p
-Administrator may choose to first shutdown the cluster and then downgrade 
it.
-The following are the steps:
-  /p
-  ol
-  liShutdown all emNNs/em and emDNs/em./li
-  liRestore the pre-upgrade release in all machines./li
-  liStart emNNs/em with the 
-a href=#namenode_-rollingUpgradecode-rollingUpgrade 
downgrade/code/a option./li
-  liStart emDNs/em normally./li
-  /ol
-  /subsection
   /section
 
   section name=Rollback id=Rollback



[29/50] [abbrv] hadoop git commit: Fix commit version for YARN-3537

2015-04-27 Thread zjshen
Fix commit version for YARN-3537


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8257b1db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8257b1db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8257b1db

Branch: refs/heads/YARN-2928
Commit: 8257b1db2322e186dbdc414672db6c002567f09b
Parents: 6117a70
Author: Jason Lowe jl...@apache.org
Authored: Fri Apr 24 22:07:53 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8257b1db/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aafb74f..84d7383 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -331,6 +331,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3444. Fix typo capabililty. (Gabor Liptak via aajisaka)
 
+YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
+invoked (Brahma Reddy Battula via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -354,9 +357,6 @@ Release 2.7.1 - UNRELEASED
 YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the
 correct user. (Zhijie Shen via jianhe)
 
-YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
-invoked (Brahma Reddy Battula via jlowe)
-
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[35/50] [abbrv] hadoop git commit: MAPREDUCE-6333. TestEvents, TestAMWebServicesTasks, TestAppController are broken due to MAPREDUCE-6297. (Siqi Li via gera)

2015-04-27 Thread zjshen
MAPREDUCE-6333. TestEvents,TestAMWebServicesTasks,TestAppController are broken 
due to MAPREDUCE-6297. (Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c2b960b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c2b960b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c2b960b

Branch: refs/heads/YARN-2928
Commit: 7c2b960bc3491652b5b583868a4d759c19291c68
Parents: ec0a1bc
Author: Gera Shegalov g...@apache.org
Authored: Fri Apr 24 09:21:44 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:52 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/jobhistory/TestEvents.java | 29 ++--
 .../v2/app/webapp/TestAMWebServicesTasks.java   | 27 --
 .../v2/app/webapp/TestAppController.java|  9 +++---
 4 files changed, 41 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c2b960b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4cb7d1c..f895034 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -358,6 +358,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6330. Fix typo in Task Attempt API's URL in documentations.
 (Ryu Kobayashi via ozawa)
 
+MAPREDUCE-6333. TestEvents,TestAMWebServicesTasks,TestAppController are
+broken due to MAPREDUCE-6297. (Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c2b960b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
index 51847a9..741ca07 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
@@ -40,6 +40,7 @@ import org.junit.Test;
 
 public class TestEvents {
 
+  private static final String taskId = task_1_2_r_3;
   /**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
* 
@@ -132,7 +133,7 @@ public class TestEvents {
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptUnsuccessfulCompletion) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
@@ -142,42 +143,42 @@ public class TestEvents {
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptStarted) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptFinished) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptUnsuccessfulCompletion) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptUnsuccessfulCompletion) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptStarted) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptFinished) e.getDatum()).taskid.toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
-assertEquals(task_1_2_r03_4,
+assertEquals(taskId,
 ((TaskAttemptUnsuccessfulCompletion) 

[32/50] [abbrv] hadoop git commit: YARN-2498. Respect labels in preemption policy of capacity scheduler for inter-queue preemption. Contributed by Wangda Tan

2015-04-27 Thread zjshen
YARN-2498. Respect labels in preemption policy of capacity scheduler for 
inter-queue preemption. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5269e053
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5269e053
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5269e053

Branch: refs/heads/YARN-2928
Commit: 5269e05339fc7830588c37839bc3c858a7544d6f
Parents: 5a95182
Author: Jian He jia...@apache.org
Authored: Fri Apr 24 17:03:13 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../ProportionalCapacityPreemptionPolicy.java   |  585 +
 .../rmcontainer/RMContainerImpl.java|   28 +-
 .../scheduler/capacity/CapacityScheduler.java   |2 +-
 .../scheduler/capacity/LeafQueue.java   |   70 +-
 .../scheduler/common/AssignmentInformation.java |   31 +-
 ...estProportionalCapacityPreemptionPolicy.java |   94 +-
 ...pacityPreemptionPolicyForNodePartitions.java | 1211 ++
 .../scheduler/capacity/TestChildQueueOrder.java |2 +-
 .../scheduler/capacity/TestLeafQueue.java   |4 +-
 .../TestNodeLabelContainerAllocation.java   |   16 +
 .../scheduler/capacity/TestParentQueue.java |2 +-
 12 files changed, 1750 insertions(+), 298 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5269e053/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 605079e..195cf44 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -166,6 +166,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3319. Implement a FairOrderingPolicy. (Craig Welch via wangda)
 
+YARN-2498. Respect labels in preemption policy of capacity scheduler for
+inter-queue preemption. (Wangda Tan via jianhe)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5269e053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 2ab4197..1f47b5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -26,11 +27,10 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 import java.util.PriorityQueue;
 import java.util.Set;
+import java.util.TreeSet;
 
-import org.apache.commons.collections.map.HashedMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -49,7 +48,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptE
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import 

[41/50] [abbrv] hadoop git commit: HADOOP-11357. Print information of the build enviornment in test-patch.sh (aw)

2015-04-27 Thread zjshen
HADOOP-11357. Print information of the build enviornment in test-patch.sh (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c7441c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c7441c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c7441c

Branch: refs/heads/YARN-2928
Commit: 87c7441cc4147cbea71dbb9cf734081d758174bf
Parents: c099222
Author: Allen Wittenauer a...@apache.org
Authored: Sun Apr 26 15:51:08 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:53 2015 -0700

--
 dev-support/test-patch.sh   | 17 +
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c7441c/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 2537e85..e331deb 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -234,6 +234,21 @@ function add_jira_table
   fi
 }
 
+## @description  Put the final environment information at the bottom
+## @description  of the footer table
+## @stability stable
+## @audience private
+## @replaceable  yes
+function close_jira_footer
+{
+  # shellcheck disable=SC2016
+  local -r javaversion=$(${JAVA_HOME}/bin/java -version 21 | head -1 | 
${AWK} '{print $NF}' | tr -d \)
+  local -r unamea=$(uname -a)
+
+  add_jira_footer Java ${javaversion}
+  add_jira_footer uname ${unamea}
+}
+
 ## @description  Put the final elapsed time at the bottom of the table.
 ## @audience private
 ## @stabilitystable
@@ -2389,6 +2404,8 @@ postinstall
 
 runtests
 
+close_jira_footer
+
 close_jira_table
 
 output_to_console ${RESULT}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c7441c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5ba71a4..597496a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -519,6 +519,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10597. RPC Server signals backoff to clients when all request
 queues are full. (Ming Ma via Arpit Agarwal)
 
+HADOOP-11357. Print information of the build enviornment in test-patch.sh
+(aw)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp



[07/50] [abbrv] hadoop git commit: HADOOP-11627. Remove io.native.lib.available. Contributed by Brahma Reddy Battula.

2015-04-27 Thread zjshen
HADOOP-11627. Remove io.native.lib.available. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6fc3e43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6fc3e43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6fc3e43

Branch: refs/heads/YARN-2928
Commit: d6fc3e4370ab50dacffd318327ade846093d046e
Parents: 86dbb93
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Apr 24 08:08:55 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:47 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/conf/Configuration.java   |  2 -
 .../fs/CommonConfigurationKeysPublic.java   |  5 --
 .../hadoop/io/compress/bzip2/Bzip2Factory.java  | 13 +
 .../hadoop/io/compress/zlib/ZlibFactory.java| 25 +++--
 .../apache/hadoop/util/NativeCodeLoader.java| 26 -
 .../src/main/resources/core-default.xml |  9 
 .../src/site/markdown/DeprecatedProperties.md   |  1 +
 .../apache/hadoop/io/compress/TestCodec.java| 56 +++-
 .../zlib/TestZlibCompressorDecompressor.java|  2 -
 .../file/tfile/TestTFileSeqFileComparison.java  |  2 -
 .../mapred/TestConcatenatedCompressedInput.java | 47 
 12 files changed, 81 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fc3e43/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 777828e..22ef212 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -23,6 +23,9 @@ Trunk (Unreleased)
 
 HADOOP-11731. Rework the changelog and releasenotes (aw)
 
+HADOOP-11627. Remove io.native.lib.available.
+(Brahma Reddy Battula via aajisaka)
+
   NEW FEATURES
 
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fc3e43/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 7c25e6c..54e07c6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -447,8 +447,6 @@ public class Configuration implements 
IterableMap.EntryString,String,
 CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY),
   new DeprecationDelta(dfs.df.interval, 
 CommonConfigurationKeys.FS_DF_INTERVAL_KEY),
-  new DeprecationDelta(hadoop.native.lib, 
-CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY),
   new DeprecationDelta(fs.default.name, 
 CommonConfigurationKeys.FS_DEFAULT_NAME_KEY),
   new DeprecationDelta(dfs.umaskmode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fc3e43/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 87c2aba..90c6934 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -38,11 +38,6 @@ public class CommonConfigurationKeysPublic {
   
   // The Keys
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
-  public static final String  IO_NATIVE_LIB_AVAILABLE_KEY =
-io.native.lib.available;
-  /** Default value for IO_NATIVE_LIB_AVAILABLE_KEY */
-  public static final boolean IO_NATIVE_LIB_AVAILABLE_DEFAULT = true;
-  /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY =
 net.topology.script.number.args;
   /** Default value for NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fc3e43/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java

[27/50] [abbrv] hadoop git commit: HADOOP-11843. Make setting up the build environment easier. Contributed by Niels Basjes.

2015-04-27 Thread zjshen
HADOOP-11843. Make setting up the build environment easier. Contributed by 
Niels Basjes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3851c68a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3851c68a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3851c68a

Branch: refs/heads/YARN-2928
Commit: 3851c68a4bb9673964f5a94638534131171154c5
Parents: c0338c9
Author: cnauroth cnaur...@apache.org
Authored: Fri Apr 24 13:05:18 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:50 2015 -0700

--
 BUILDING.txt|  39 +-
 dev-support/docker/Dockerfile   |  67 +++
 dev-support/docker/hadoop_env_checks.sh | 118 +++
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 start-build-env.sh  |  50 
 5 files changed, 276 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3851c68a/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 3ca9fae..de0e0e8 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -16,6 +16,43 @@ Requirements:
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 
 
--
+The easiest way to get an environment with all the appropriate tools is by 
means
+of the provided Docker config.
+This requires a recent version of docker ( 1.4.1 and higher are known to work 
).
+
+On Linux:
+Install Docker and run this command:
+
+$ ./start-build-env.sh
+
+On Mac:
+First make sure Homebrew has been installed ( http://brew.sh/ )
+$ brew install docker boot2docker
+$ boot2docker init -m 4096
+$ boot2docker start
+$ $(boot2docker shellinit)
+$ ./start-build-env.sh
+
+The prompt which is then presented is located at a mounted version of the 
source tree
+and all required tools for testing and building have been installed and 
configured.
+
+Note that from within this docker environment you ONLY have access to the 
Hadoop source
+tree from where you started. So if you need to run
+dev-support/test-patch.sh /path/to/my.patch
+then the patch must be placed inside the hadoop source tree.
+
+Known issues:
+- On Mac with Boot2Docker the performance on the mounted directory is 
currently extremely slow.
+  This is a known problem related to boot2docker on the Mac.
+  See:
+https://github.com/boot2docker/boot2docker/issues/593
+  This issue has been resolved as a duplicate, and they point to a new feature 
for utilizing NFS mounts
+  as the proposed solution:
+https://github.com/boot2docker/boot2docker/issues/64
+  An alternative solution to this problem is when you install Linux native 
inside a virtual machine
+  and run your IDE and Docker etc in side that VM.
+
+--
 Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
 
 * Oracle JDK 1.7 (preferred)
@@ -29,7 +66,7 @@ Installing required packages for clean install of Ubuntu 
14.04 LTS Desktop:
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev
 * ProtocolBuffer 2.5.0 (required)
-  $ sudo apt-get -y install libprotobuf-dev protobuf-compiler
+  $ sudo apt-get -y install protobuf-compiler
 
 Optional packages:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3851c68a/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
new file mode 100644
index 000..81296dc
--- /dev/null
+++ b/dev-support/docker/Dockerfile
@@ -0,0 +1,67 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Dockerfile for installing the necessary dependencies for building Hadoop.
+# See BUILDING.txt.
+
+# FROM 

[30/50] [abbrv] hadoop git commit: HDFS-8211. DataNode UUID is always null in the JMX counter. (Contributed by Anu Engineer)

2015-04-27 Thread zjshen
HDFS-8211. DataNode UUID is always null in the JMX counter. (Contributed by Anu 
Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a951823
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a951823
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a951823

Branch: refs/heads/YARN-2928
Commit: 5a9518235d804447b541a9558287fed85e07c903
Parents: 1164c7b
Author: Arpit Agarwal a...@apache.org
Authored: Fri Apr 24 16:47:48 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:51 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 +-
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 65 
 3 files changed, 70 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a951823/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 317211e..a7b5ed3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -560,6 +560,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte.
 (Zhe Zhang via wang)
 
+HDFS-8211. DataNode UUID is always null in the JMX counter. (Anu Engineer
+via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a951823/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 23ab43a..2401d9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1226,7 +1226,7 @@ public class DataNode extends ReconfigurableBase
*
* @throws IOException
*/
-  private synchronized void checkDatanodeUuid() throws IOException {
+  synchronized void checkDatanodeUuid() throws IOException {
 if (storage.getDatanodeUuid() == null) {
   storage.setDatanodeUuid(generateUuid());
   storage.writeAll();
@@ -3159,7 +3159,7 @@ public class DataNode extends ReconfigurableBase
   }
 
   public String getDatanodeUuid() {
-return id == null ? null : id.getDatanodeUuid();
+return storage == null ? null : storage.getDatanodeUuid();
   }
 
   boolean shouldRun() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a951823/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
new file mode 100644
index 000..34e53a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+public class TestDataNodeUUID {
+
+  /**
+   * This 

[31/50] [abbrv] hadoop git commit: YARN-2498. Respect labels in preemption policy of capacity scheduler for inter-queue preemption. Contributed by Wangda Tan

2015-04-27 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5269e053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
new file mode 100644
index 000..e13320c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -0,0 +1,1211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+
+import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MONITORING_INTERVAL;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.NATURAL_TERMINATION_FACTOR;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.TOTAL_PREEMPTION_PER_ROUND;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.WAIT_TIME_BEFORE_KILL;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
+import 

[47/50] [abbrv] hadoop git commit: YARN-3530. ATS throws exception on trying to filter results without otherinfo. Contributed by zhijie shen

2015-04-27 Thread zjshen
YARN-3530. ATS throws exception on trying to filter results without
otherinfo. Contributed by zhijie shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ee2409b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ee2409b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ee2409b

Branch: refs/heads/YARN-2928
Commit: 3ee2409bc2ec4cad3d2dfe5bdf8296eb4bf5ae4b
Parents: cea12ac
Author: Xuan xg...@apache.org
Authored: Mon Apr 27 10:36:42 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/timeline/LeveldbTimelineStore.java   | 34 ++-
 .../server/timeline/TimelineStoreTestUtils.java | 99 ++--
 3 files changed, 104 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ee2409b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 50464f8..576d2da 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -335,6 +335,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3464. Race condition in LocalizerRunner kills localizer before 
 localizing all resources. (Zhihai Xu via kasha)
 
+YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
+(zhijie shen via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ee2409b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index d521f70..8cfa0c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -357,6 +357,9 @@ public class LeveldbTimelineStore extends AbstractService
   iterator = new LeveldbIterator(db);
   iterator.seek(prefix);
 
+  if (fields == null) {
+fields = EnumSet.allOf(Field.class);
+  }
   return getEntity(entityId, entityType, revStartTime, fields, iterator,
   prefix, prefix.length);
 } catch(DBException e) {
@@ -373,10 +376,6 @@ public class LeveldbTimelineStore extends AbstractService
   private static TimelineEntity getEntity(String entityId, String entityType,
   Long startTime, EnumSetField fields, LeveldbIterator iterator,
   byte[] prefix, int prefixlen) throws IOException {
-if (fields == null) {
-  fields = EnumSet.allOf(Field.class);
-}
-
 TimelineEntity entity = new TimelineEntity();
 boolean events = false;
 boolean lastEvent = false;
@@ -590,6 +589,25 @@ public class LeveldbTimelineStore extends AbstractService
   String entityType, Long limit, Long starttime, Long endtime,
   String fromId, Long fromTs, CollectionNameValuePair secondaryFilters,
   EnumSetField fields, CheckAcl checkAcl) throws IOException {
+// Even if other info and primary filter fields are not included, we
+// still need to load them to match secondary filters when they are
+// non-empty
+if (fields == null) {
+  fields = EnumSet.allOf(Field.class);
+}
+boolean addPrimaryFilters = false;
+boolean addOtherInfo = false;
+if (secondaryFilters != null  secondaryFilters.size()  0) {
+  if (!fields.contains(Field.PRIMARY_FILTERS)) {
+fields.add(Field.PRIMARY_FILTERS);
+addPrimaryFilters = true;
+  }
+  if (!fields.contains(Field.OTHER_INFO)) {
+fields.add(Field.OTHER_INFO);
+addOtherInfo = true;
+  }
+}
+
 LeveldbIterator iterator = null;
 try {
   KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
@@ -690,6 +708,14 @@ public class LeveldbTimelineStore extends AbstractService
 entity.setDomainId(DEFAULT_DOMAIN_ID);
   }
   if (checkAcl == null || checkAcl.check(entity)) {
+// Remove primary filter and other info if they are added for
+ 

[46/50] [abbrv] hadoop git commit: MAPREDUCE-6252. JobHistoryServer should not fail when encountering a missing directory. Contributed by Craig Welch.

2015-04-27 Thread zjshen
MAPREDUCE-6252. JobHistoryServer should not fail when encountering a
missing directory. Contributed by Craig Welch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f017f222
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f017f222
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f017f222

Branch: refs/heads/YARN-2928
Commit: f017f222821ea636eafb61431ef283a1d49cd7c9
Parents: 3044c8d
Author: Devaraj K deva...@apache.org
Authored: Mon Apr 27 15:01:42 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:54 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/v2/hs/HistoryFileManager.java | 19 ++---
 .../mapreduce/v2/hs/TestHistoryFileManager.java | 22 
 3 files changed, 37 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f017f222/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index dca42c4..4d217cd 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -364,6 +364,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6333. TestEvents,TestAMWebServicesTasks,TestAppController are
 broken due to MAPREDUCE-6297. (Siqi Li via gera)
 
+MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
+missing directory. (Craig Welch via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f017f222/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 65f8a4f..69f814d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -740,17 +740,22 @@ public class HistoryFileManager extends AbstractService {
 }
   }
 
-  private static ListFileStatus scanDirectory(Path path, FileContext fc,
+  @VisibleForTesting
+  protected static ListFileStatus scanDirectory(Path path, FileContext fc,
   PathFilter pathFilter) throws IOException {
 path = fc.makeQualified(path);
 ListFileStatus jhStatusList = new ArrayListFileStatus();
-RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
-while (fileStatusIter.hasNext()) {
-  FileStatus fileStatus = fileStatusIter.next();
-  Path filePath = fileStatus.getPath();
-  if (fileStatus.isFile()  pathFilter.accept(filePath)) {
-jhStatusList.add(fileStatus);
+try {
+  RemoteIteratorFileStatus fileStatusIter = fc.listStatus(path);
+  while (fileStatusIter.hasNext()) {
+FileStatus fileStatus = fileStatusIter.next();
+Path filePath = fileStatus.getPath();
+if (fileStatus.isFile()  pathFilter.accept(filePath)) {
+  jhStatusList.add(fileStatus);
+}
   }
+} catch (FileNotFoundException fe) {
+  LOG.error(Error while scanning directory  + path, fe);
 }
 return jhStatusList;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f017f222/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
index e2e943a..1c5cc5c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
@@ -21,13 +21,17 @@ package org.apache.hadoop.mapreduce.v2.hs;
 
 import java.io.File;
 import 

[22/50] [abbrv] hadoop git commit: YARN-3511. Add errors and warnings page to ATS. Contributed by Varun Vasudev

2015-04-27 Thread zjshen
YARN-3511. Add errors and warnings page to ATS. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72f3618e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72f3618e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72f3618e

Branch: refs/heads/YARN-2928
Commit: 72f3618e40ba07548c12593dfaf93b89424b571f
Parents: 50eb78d
Author: Xuan xg...@apache.org
Authored: Fri Apr 24 09:41:59 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:49 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../webapp/AHSController.java   |  4 ++
 .../webapp/AHSErrorsAndWarningsPage.java| 57 
 .../webapp/AHSWebApp.java   |  1 +
 .../webapp/NavBlock.java| 30 +--
 .../server/webapp/ErrorsAndWarningsBlock.java   | 23 +++-
 .../server/resourcemanager/webapp/NavBlock.java |  2 +-
 7 files changed, 114 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f3618e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 589952f..ca87e2f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -230,6 +230,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3503. Expose disk utilization percentage and bad local and log dir 
 counts in NM metrics. (Varun Vasudev via jianhe)
 
+YARN-3511. Add errors and warnings page to ATS. (Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f3618e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
index 4e00bc8..4037f51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
@@ -52,4 +52,8 @@ public class AHSController extends Controller {
   public void logs() {
 render(AHSLogsPage.class);
   }
+
+  public void errorsAndWarnings() {
+render(AHSErrorsAndWarningsPage.class);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f3618e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
new file mode 100644
index 000..3798ee5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES 

[28/50] [abbrv] hadoop git commit: HADOOP-11876. Refactor code to make it more readable, minor maybePrintStats bug (Zoran Dimitrijevic via raviprak)

2015-04-27 Thread zjshen
HADOOP-11876. Refactor code to make it more readable, minor maybePrintStats bug 
(Zoran Dimitrijevic via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a13dcee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a13dcee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a13dcee

Branch: refs/heads/YARN-2928
Commit: 9a13dceec9f7aa239a0c84dc734b87116e5edef0
Parents: 3851c68
Author: Ravi Prakash ravip...@altiscale.com
Authored: Fri Apr 24 13:39:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:50 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../java/org/apache/hadoop/tools/SimpleCopyListing.java   | 10 +-
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a13dcee/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 80c8a54..826c77e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -561,6 +561,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11864. JWTRedirectAuthenticationHandler breaks java8 javadocs.
 (Larry McCay via stevel)
 
+HADOOP-11876. Refactor code to make it more readable, minor
+maybePrintStats bug (Zoran Dimitrijevic via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a13dcee/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index b9ba099..4ea1dc9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -343,11 +343,12 @@ public class SimpleCopyListing extends CopyListing {
   }
 }
 result = new WorkReportFileStatus[](
-fileSystem.listStatus(parent.getPath()), 0, true);
+fileSystem.listStatus(parent.getPath()), retry, true);
   } catch (FileNotFoundException fnf) {
 LOG.error(FileNotFoundException exception in listStatus:  +
   fnf.getMessage());
-result = new WorkReportFileStatus[](new FileStatus[0], 0, true, fnf);
+result = new WorkReportFileStatus[](new FileStatus[0], retry, true,
+  fnf);
   } catch (Exception e) {
 LOG.error(Exception in listStatus. Will send for retry.);
 FileStatus[] parentList = new FileStatus[1];
@@ -391,7 +392,6 @@ public class SimpleCopyListing extends CopyListing {
 
 for (FileStatus status : sourceDirs) {
   workers.put(new WorkRequestFileStatus(status, 0));
-  maybePrintStats();
 }
 
 while (workers.hasWork()) {
@@ -402,7 +402,7 @@ public class SimpleCopyListing extends CopyListing {
   if (LOG.isDebugEnabled()) {
 LOG.debug(Recording source-path:  + child.getPath() +  for 
copy.);
   }
-  if (retry == 0) {
+  if (workResult.getSuccess()) {
 CopyListingFileStatus childCopyListingStatus =
   DistCpUtils.toCopyListingFileStatus(sourceFS, child,
 preserveAcls  child.isDirectory(),
@@ -417,7 +417,6 @@ public class SimpleCopyListing extends CopyListing {
 LOG.debug(Traversing into source dir:  + child.getPath());
   }
   workers.put(new WorkRequestFileStatus(child, retry));
-  maybePrintStats();
 }
   } else {
 LOG.error(Giving up on  + child.getPath() +
@@ -472,5 +471,6 @@ public class SimpleCopyListing extends CopyListing {
   totalDirs++;
 }
 totalPaths++;
+maybePrintStats();
   }
 }



[38/50] [abbrv] hadoop git commit: Moving YARN-3351, YARN-3382, YARN-3472, MAPREDUCE-6238 to the 2.7.1 CHANGES.txt sections given the recent merge into branch-2.7.

2015-04-27 Thread zjshen
Moving YARN-3351, YARN-3382, YARN-3472, MAPREDUCE-6238 to the 2.7.1 CHANGES.txt
sections given the recent merge into branch-2.7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec0a1bce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec0a1bce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec0a1bce

Branch: refs/heads/YARN-2928
Commit: ec0a1bce73f12032199251a8e8013d1e2cf9c253
Parents: 5269e05
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Fri Apr 24 17:18:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:52 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt |  6 +++---
 hadoop-yarn-project/CHANGES.txt  | 17 +
 2 files changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0a1bce/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 1d8b8ff..4cb7d1c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -349,9 +349,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
-MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
-which is a regression from MR1 (zxu via rkanter)
-
 MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
 event thread. (Sangjin Lee via gera)
 
@@ -375,6 +372,9 @@ Release 2.7.1 - UNRELEASED
 
 MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
 
+MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
+which is a regression from MR1 (zxu via rkanter)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0a1bce/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 195cf44..f87d2d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -257,8 +257,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3205 FileSystemRMStateStore should disable FileSystem Cache to avoid
 get a Filesystem with an old configuration. (Zhihai Xu via ozawa)
 
-YARN-3351. AppMaster tracking URL is broken in HA. (Anubhav Dhoot via 
kasha)
-
 YARN-3269. Yarn.nodemanager.remote-app-log-dir could not be configured to 
 fully qualified path. (Xuan Gong via junping_du)
 
@@ -302,12 +300,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
 (Zhihai Xu via kasha)
 
-YARN-3382. Some of UserMetricsInfo metrics are incorrectly set to root
-queue metrics. (Rohit Agarwal via jianhe)
-
-YARN-3472. Fixed possible leak in DelegationTokenRenewer#allTokens.
-(Rohith Sharmaks via jianhe)
-
 YARN-3266. RMContext#inactiveNodes should have NodeId as map key.
 (Chengbing Liu via jianhe)
 
@@ -351,6 +343,7 @@ Release 2.7.1 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
+
 YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
 calling getQueue (Jason Lowe via wangda)
 
@@ -363,6 +356,14 @@ Release 2.7.1 - UNRELEASED
 YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the
 correct user. (Zhijie Shen via jianhe)
 
+YARN-3351. AppMaster tracking URL is broken in HA. (Anubhav Dhoot via 
kasha)
+
+YARN-3382. Some of UserMetricsInfo metrics are incorrectly set to root
+queue metrics. (Rohit Agarwal via jianhe)
+
+YARN-3472. Fixed possible leak in DelegationTokenRenewer#allTokens.
+(Rohith Sharmaks via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[48/50] [abbrv] hadoop git commit: MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.

2015-04-27 Thread zjshen
MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cea12ac0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cea12ac0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cea12ac0

Branch: refs/heads/YARN-2928
Commit: cea12ac03508c4f034469f33d8b69a101c0fe916
Parents: f017f22
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Apr 28 01:47:37 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:54 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../src/site/markdown/MapReduceTutorial.md  | 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cea12ac0/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4d217cd..108cf75 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -367,6 +367,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
 missing directory. (Craig Welch via devaraj)
 
+MAPREDUCE-6341. Fix typo in mapreduce tutorial. (John Michael Luy
+via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cea12ac0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index ccc9590..cd087d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -210,7 +210,9 @@ Assuming that:
 
 Sample text-files as input:
 
-$ bin/hadoop fs -ls /user/joe/wordcount/input/ 
/user/joe/wordcount/input/file01 /user/joe/wordcount/input/file02
+$ bin/hadoop fs -ls /user/joe/wordcount/input/
+/user/joe/wordcount/input/file01
+/user/joe/wordcount/input/file02
 
 $ bin/hadoop fs -cat /user/joe/wordcount/input/file01
 Hello World Bye World
@@ -224,12 +226,12 @@ Run the application:
 
 Output:
 
-$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0`
+$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0
 Bye 1
 Goodbye 1
 Hadoop 2
 Hello 2
-World 2`
+World 2
 
 Applications can specify a comma separated list of paths which would be 
present in the current working directory of the task using the option `-files`. 
The `-libjars` option allows applications to add jars to the classpaths of the 
maps and reduces. The option `-archives` allows them to pass comma separated 
list of archives as arguments. These archives are unarchived and a link with 
name of the archive is created in the current working directory of tasks. More 
details about the command line options are available at [Commands 
Guide](../../hadoop-project-dist/hadoop-common/CommandsManual.html).
 
@@ -288,13 +290,13 @@ The output of the first map:
 
  Bye, 1
  Hello, 1
- World, 2`
+ World, 2
 
 The output of the second map:
 
  Goodbye, 1
  Hadoop, 2
- Hello, 1`
+ Hello, 1
 
 ```java
 public void reduce(Text key, IterableIntWritable values,



[33/50] [abbrv] hadoop git commit: YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore invoked. Contributed by Brahma Reddy Battula

2015-04-27 Thread zjshen
YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
invoked. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6117a701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6117a701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6117a701

Branch: refs/heads/YARN-2928
Commit: 6117a701a1e8f47936c7429ecacd58b1db60d801
Parents: 0331b4d
Author: Jason Lowe jl...@apache.org
Authored: Fri Apr 24 22:02:53 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../yarn/server/nodemanager/NodeManager.java| 26 +++-
 2 files changed, 17 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6117a701/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fa26329..aafb74f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -354,6 +354,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3522. Fixed DistributedShell to instantiate TimeLineClient as the
 correct user. (Zhijie Shen via jianhe)
 
+YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
+invoked (Brahma Reddy Battula via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6117a701/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 6f71004..609975b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -185,18 +185,20 @@ public class NodeManager extends CompositeService
   }
 
   private void stopRecoveryStore() throws IOException {
-nmStore.stop();
-if (null != context) {
-  if (context.getDecommissioned()  nmStore.canRecover()) {
-LOG.info(Removing state store due to decommission);
-Configuration conf = getConfig();
-Path recoveryRoot =
-new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR));
-LOG.info(Removing state store at  + recoveryRoot
-+  due to decommission);
-FileSystem recoveryFs = FileSystem.getLocal(conf);
-if (!recoveryFs.delete(recoveryRoot, true)) {
-  LOG.warn(Unable to delete  + recoveryRoot);
+if (null != nmStore) {
+  nmStore.stop();
+  if (null != context) {
+if (context.getDecommissioned()  nmStore.canRecover()) {
+  LOG.info(Removing state store due to decommission);
+  Configuration conf = getConfig();
+  Path recoveryRoot =
+  new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR));
+  LOG.info(Removing state store at  + recoveryRoot
+  +  due to decommission);
+  FileSystem recoveryFs = FileSystem.getLocal(conf);
+  if (!recoveryFs.delete(recoveryRoot, true)) {
+LOG.warn(Unable to delete  + recoveryRoot);
+  }
 }
   }
 }



hadoop git commit: HADOOP-11870. [JDK8] AuthenticationFilter, CertificateUtil, SignerSecretProviders, KeyAuthorizationKeyProvider Javadoc issues (rkanter)

2015-04-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6bae5962c - 9fec02c06


HADOOP-11870. [JDK8] AuthenticationFilter, CertificateUtil, 
SignerSecretProviders, KeyAuthorizationKeyProvider Javadoc issues (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fec02c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fec02c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fec02c0

Branch: refs/heads/trunk
Commit: 9fec02c069f9bb24b5ee99031917075b4c7a7682
Parents: 6bae596
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 27 13:25:11 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 27 13:25:11 2015 -0700

--
 .../authentication/server/AuthenticationFilter.java| 13 +
 .../security/authentication/util/CertificateUtil.java  |  3 ++-
 .../util/RolloverSignerSecretProvider.java |  2 +-
 .../authentication/util/SignerSecretProvider.java  |  2 +-
 .../authentication/util/ZKSignerSecretProvider.java|  2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../key/kms/server/KeyAuthorizationKeyProvider.java|  4 ++--
 7 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fec02c0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 9cfa3c2..0f86623 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -589,6 +589,13 @@ public class AuthenticationFilter implements Filter {
   /**
* Delegates call to the servlet filter chain. Sub-classes my override this
* method to perform pre and post tasks.
+   *
+   * @param filterChain the filter chain object.
+   * @param request the request object.
+   * @param response the response object.
+   *
+   * @throws IOException thrown if an IO error occurred.
+   * @throws ServletException thrown if a processing error occurred.
*/
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
@@ -598,9 +605,15 @@ public class AuthenticationFilter implements Filter {
   /**
* Creates the Hadoop authentication HTTP cookie.
*
+   * @param resp the response object.
* @param token authentication token for the cookie.
+   * @param domain the cookie domain.
+   * @param path the cokie path.
* @param expires UNIX timestamp that indicates the expire date of the
*cookie. It has no effect if its value lt; 0.
+   * @param isSecure is the cookie secure?
+   * @param token the token.
+   * @param expires the cookie expiration time.
*
* XXX the following code duplicate some logic in Jetty / Servlet API,
* because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fec02c0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
index 77b2530..1ca59ae 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
@@ -36,7 +36,8 @@ public class CertificateUtil {
*
* @param pem
*  - the pem encoding from config without the header and footer
-   * @return RSAPublicKey
+   * @return RSAPublicKey the RSA public key
+   * @throws ServletException thrown if a processing error occurred
*/
   public static RSAPublicKey parseRSAPublicKey(String pem) throws 
ServletException {
 String fullPem = PEM_HEADER + pem + PEM_FOOTER;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fec02c0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java

[3/3] hadoop git commit: YARN-3464. Race condition in LocalizerRunner kills localizer before localizing all resources. (Zhihai Xu via kasha)

2015-04-27 Thread kasha
YARN-3464. Race condition in LocalizerRunner kills localizer before localizing 
all resources. (Zhihai Xu via kasha)

(cherry picked from commit 47279c3228185548ed09c36579b420225e4894f5)
(cherry picked from commit 4045c41afe440b773d006e962bf8a5eae3fdc284)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ddcc7e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ddcc7e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ddcc7e5

Branch: refs/heads/branch-2.7
Commit: 4ddcc7e5b5b6d7d01d6dc3c79fa330d6a44e59ba
Parents: bec78f9
Author: Karthik Kambatla ka...@apache.org
Authored: Sun Apr 26 09:13:46 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Apr 27 13:37:06 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../container/ContainerImpl.java|  8 ++-
 .../localizer/ResourceLocalizationService.java  | 53 +++-
 .../localizer/event/LocalizationEventType.java  |  1 +
 .../TestResourceLocalizationService.java| 12 -
 5 files changed, 61 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddcc7e5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5760f34..4f55003 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -41,6 +41,8 @@ Release 2.7.1 - UNRELEASED
 YARN-3516. killing ContainerLocalizer action doesn't take effect when
 private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
 
+YARN-3464. Race condition in LocalizerRunner kills localizer before 
+localizing all resources. (Zhihai Xu via kasha)
 
 Release 2.7.0 - 2015-04-20
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddcc7e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 131d439..f55e0e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -59,7 +59,9 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
@@ -715,7 +717,12 @@ public class ContainerImpl implements Container {
 return ContainerState.LOCALIZING;
   }
 
+  container.dispatcher.getEventHandler().handle(
+  new ContainerLocalizationEvent(LocalizationEventType.
+  CONTAINER_RESOURCES_LOCALIZED, container));
+
   container.sendLaunchEvent();
+  container.metrics.endInitingContainer();
 
   // If this is a recovered container that has already launched, skip
   // uploading resources to the shared cache. We do this to avoid uploading
@@ -733,7 +740,6 @@ public class ContainerImpl implements Container {
 SharedCacheUploadEventType.UPLOAD));
   }
 
-  container.metrics.endInitingContainer();
   return ContainerState.LOCALIZED;
 }
 

[1/3] hadoop git commit: YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu via kasha)

2015-04-27 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 176d22b44 - 4ddcc7e5b


YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu 
via kasha)

(cherry picked from commit 6495940eae09418a939882a8955845f9241a6485)
(cherry picked from commit 53e0bf5c172c396780b96fda8dd31ad799a25fed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5cc78e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5cc78e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5cc78e5

Branch: refs/heads/branch-2.7
Commit: b5cc78e57e5ae3a3194e72132ded1c5261ea5f60
Parents: 176d22b
Author: Karthik Kambatla ka...@apache.org
Authored: Thu Apr 9 00:07:49 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Apr 27 12:39:21 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../nodemanager/containermanager/container/ContainerImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5cc78e5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d9d8f5f..914b1bc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3472. Fixed possible leak in DelegationTokenRenewer#allTokens.
 (Rohith Sharmaks via jianhe)
 
+YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+(Zhihai Xu via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5cc78e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cf3d8e7..131d439 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -638,7 +639,7 @@ public class ContainerImpl implements Container {
   return ContainerState.LOCALIZATION_FAILED;
 }
 MapLocalResourceVisibility, CollectionLocalResourceRequest req =
-new HashMapLocalResourceVisibility, 
+new LinkedHashMapLocalResourceVisibility,
 CollectionLocalResourceRequest();
 if (!container.publicRsrcs.isEmpty()) {
   req.put(LocalResourceVisibility.PUBLIC, container.publicRsrcs);



[2/3] hadoop git commit: YARN-3516. killing ContainerLocalizer action doesn't take effect when private localizer receives FETCH_FAILURE status. Contributed by zhihai xu

2015-04-27 Thread kasha
YARN-3516. killing ContainerLocalizer action doesn't take effect when
private localizer receives FETCH_FAILURE status. Contributed by zhihai
xu

(cherry picked from commit 0b3f8957a87ada1a275c9904b211fdbdcefafb02)
(cherry picked from commit 8f6053ae517d30dedece4f0577341ec3c2482252)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bec78f9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bec78f9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bec78f9b

Branch: refs/heads/branch-2.7
Commit: bec78f9b85381bf05a2b3b1391ebeaaf4caa7ba9
Parents: b5cc78e
Author: Xuan xg...@apache.org
Authored: Thu Apr 23 16:40:40 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Apr 27 13:35:38 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 4 
 .../containermanager/localizer/ResourceLocalizationService.java  | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec78f9b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 914b1bc..5760f34 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -38,6 +38,10 @@ Release 2.7.1 - UNRELEASED
 YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
 (Zhihai Xu via kasha)
 
+YARN-3516. killing ContainerLocalizer action doesn't take effect when
+private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
+
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec78f9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index dd50ead..bb05946 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -981,7 +981,7 @@ public class ResourceLocalizationService extends 
CompositeService
   case FETCH_FAILURE:
 final String diagnostics = stat.getException().toString();
 LOG.warn(req +  failed:  + diagnostics);
-response.setLocalizerAction(LocalizerAction.DIE);
+action = LocalizerAction.DIE;
 getLocalResourcesTracker(req.getVisibility(), user, applicationId)
   .handle(new ResourceFailedLocalizationEvent(
   req, diagnostics));



hadoop git commit: MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM tokens when they roll-over. Contributed by Jason Lowe.

2015-04-27 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 32cd2c8d4 - 9fc32c5c4


MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM 
tokens when they roll-over. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fc32c5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fc32c5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fc32c5c

Branch: refs/heads/trunk
Commit: 9fc32c5c4d1d5f50c605bdb0e3b13f44c86660c8
Parents: 32cd2c8
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 27 14:58:16 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 27 14:58:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   4 +
 .../v2/app/local/LocalContainerAllocator.java   |  28 +++-
 .../app/local/TestLocalContainerAllocator.java  | 152 +--
 3 files changed, 173 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fc32c5c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f1d0493..eb6feb9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -3,6 +3,7 @@ Hadoop MapReduce Change Log
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES
+
 MAPREDUCE-5785. Derive heap size or mapreduce.*.memory.mb automatically.
 (Gera Shegalov and Karthik Kambatla via gera)
 
@@ -372,6 +373,9 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM
+tokens when they roll-over. (Jason Lowe via vinodkv)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fc32c5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 74dfb39..aed1023 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -18,11 +18,13 @@
 
 package org.apache.hadoop.mapreduce.v2.app.local;
 
+import java.io.IOException;
 import java.util.ArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
@@ -35,17 +37,22 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 
 /**
  * Allocates containers locally. Doesn't allocate a real container;
@@ -99,8 +106,9 @@ public class LocalContainerAllocator extends 

hadoop git commit: HADOOP-11870. [JDK8] AuthenticationFilter, CertificateUtil, SignerSecretProviders, KeyAuthorizationKeyProvider Javadoc issues (rkanter)

2015-04-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 de85ff58d - f00815ac8


HADOOP-11870. [JDK8] AuthenticationFilter, CertificateUtil, 
SignerSecretProviders, KeyAuthorizationKeyProvider Javadoc issues (rkanter)

(cherry picked from commit 9fec02c069f9bb24b5ee99031917075b4c7a7682)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f00815ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f00815ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f00815ac

Branch: refs/heads/branch-2
Commit: f00815ac8afc721e492972184f9e80cd358b07ec
Parents: de85ff5
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 27 13:25:11 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 27 13:26:00 2015 -0700

--
 .../authentication/server/AuthenticationFilter.java| 13 +
 .../security/authentication/util/CertificateUtil.java  |  3 ++-
 .../util/RolloverSignerSecretProvider.java |  2 +-
 .../authentication/util/SignerSecretProvider.java  |  2 +-
 .../authentication/util/ZKSignerSecretProvider.java|  2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../key/kms/server/KeyAuthorizationKeyProvider.java|  4 ++--
 7 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f00815ac/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 9cfa3c2..0f86623 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -589,6 +589,13 @@ public class AuthenticationFilter implements Filter {
   /**
* Delegates call to the servlet filter chain. Sub-classes my override this
* method to perform pre and post tasks.
+   *
+   * @param filterChain the filter chain object.
+   * @param request the request object.
+   * @param response the response object.
+   *
+   * @throws IOException thrown if an IO error occurred.
+   * @throws ServletException thrown if a processing error occurred.
*/
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
@@ -598,9 +605,15 @@ public class AuthenticationFilter implements Filter {
   /**
* Creates the Hadoop authentication HTTP cookie.
*
+   * @param resp the response object.
* @param token authentication token for the cookie.
+   * @param domain the cookie domain.
+   * @param path the cokie path.
* @param expires UNIX timestamp that indicates the expire date of the
*cookie. It has no effect if its value lt; 0.
+   * @param isSecure is the cookie secure?
+   * @param token the token.
+   * @param expires the cookie expiration time.
*
* XXX the following code duplicate some logic in Jetty / Servlet API,
* because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f00815ac/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
index 77b2530..1ca59ae 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
@@ -36,7 +36,8 @@ public class CertificateUtil {
*
* @param pem
*  - the pem encoding from config without the header and footer
-   * @return RSAPublicKey
+   * @return RSAPublicKey the RSA public key
+   * @throws ServletException thrown if a processing error occurred
*/
   public static RSAPublicKey parseRSAPublicKey(String pem) throws 
ServletException {
 String fullPem = PEM_HEADER + pem + PEM_FOOTER;


hadoop git commit: Update CHANGES.txt - Pulled in YARN-3465, YARN-3516, and YARN-3464 to branch-2.7 (for 2.7.1)

2015-04-27 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f00815ac8 - d2d8bc243


Update CHANGES.txt - Pulled in YARN-3465, YARN-3516, and YARN-3464 to 
branch-2.7 (for 2.7.1)

(cherry picked from commit 32cd2c8d429ddb87348299c00b7d851246a25b4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2d8bc24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2d8bc24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2d8bc24

Branch: refs/heads/branch-2
Commit: d2d8bc243b9f6ccc5da0f964e56d0218fe1cfa40
Parents: f00815a
Author: Karthik Kambatla ka...@apache.org
Authored: Mon Apr 27 13:42:45 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Apr 27 13:44:06 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2d8bc24/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e18bf8d..20de1ed 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -185,9 +185,6 @@ Release 2.8.0 - UNRELEASED
 YARN-2890. MiniYarnCluster should turn on timeline service if
 configured to do so. (Mit Desai via hitesh)
 
-YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
-(Zhihai Xu via kasha)
-
 YARN-3266. RMContext#inactiveNodes should have NodeId as map key.
 (Chengbing Liu via jianhe)
 
@@ -209,9 +206,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3434. Interaction between reservations and userlimit can result in 
 significant ULF violation (tgraves)
 
-YARN-3516. killing ContainerLocalizer action doesn't take effect when
-private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
-
 YARN-3387. Previous AM's container completed status couldn't pass to 
current
 AM if AM and RM restarted during the same time. (sandflee via jianhe)
 
@@ -220,9 +214,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
 invoked (Brahma Reddy Battula via jlowe)
 
-YARN-3464. Race condition in LocalizerRunner kills localizer before 
-localizing all resources. (Zhihai Xu via kasha)
-
 YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
 (zhijie shen via xgong)
 
@@ -261,6 +252,15 @@ Release 2.7.1 - UNRELEASED
 YARN-3472. Fixed possible leak in DelegationTokenRenewer#allTokens.
 (Rohith Sharmaks via jianhe)
 
+YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+(Zhihai Xu via kasha)
+
+YARN-3516. killing ContainerLocalizer action doesn't take effect when
+private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
+
+YARN-3464. Race condition in LocalizerRunner kills localizer before 
+localizing all resources. (Zhihai Xu via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



hadoop git commit: MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM tokens when they roll-over. Contributed by Jason Lowe.

2015-04-27 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d2d8bc243 - 32dc13d90


MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM 
tokens when they roll-over. Contributed by Jason Lowe.

(cherry picked from commit 9fc32c5c4d1d5f50c605bdb0e3b13f44c86660c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32dc13d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32dc13d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32dc13d9

Branch: refs/heads/branch-2
Commit: 32dc13d907a416049bdb7deff429725bd6dbcb49
Parents: d2d8bc2
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 27 14:58:16 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 27 14:59:57 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../v2/app/local/LocalContainerAllocator.java   |  28 +++-
 .../app/local/TestLocalContainerAllocator.java  | 152 +--
 3 files changed, 172 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32dc13d9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d0411b3..cbafabd 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -124,6 +124,9 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM
+tokens when they roll-over. (Jason Lowe via vinodkv)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32dc13d9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 74dfb39..aed1023 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -18,11 +18,13 @@
 
 package org.apache.hadoop.mapreduce.v2.app.local;
 
+import java.io.IOException;
 import java.util.ArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
@@ -35,17 +37,22 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 
 /**
  * Allocates containers locally. Doesn't allocate a real container;
@@ -99,8 +106,9 @@ public class LocalContainerAllocator extends RMCommunicator
 AllocateRequest.newInstance(this.lastResponseID,
   super.getApplicationProgress(), new 

hadoop git commit: Update CHANGES.txt - Pulled in YARN-3465, YARN-3516, and YARN-3464 to branch-2.7 (for 2.7.1)

2015-04-27 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9fec02c06 - 32cd2c8d4


Update CHANGES.txt - Pulled in YARN-3465, YARN-3516, and YARN-3464 to 
branch-2.7 (for 2.7.1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32cd2c8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32cd2c8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32cd2c8d

Branch: refs/heads/trunk
Commit: 32cd2c8d429ddb87348299c00b7d851246a25b4e
Parents: 9fec02c
Author: Karthik Kambatla ka...@apache.org
Authored: Mon Apr 27 13:42:45 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Apr 27 13:43:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32cd2c8d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fdc3f4a..1ac7a13 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -233,9 +233,6 @@ Release 2.8.0 - UNRELEASED
 YARN-2890. MiniYarnCluster should turn on timeline service if
 configured to do so. (Mit Desai via hitesh)
 
-YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
-(Zhihai Xu via kasha)
-
 YARN-3266. RMContext#inactiveNodes should have NodeId as map key.
 (Chengbing Liu via jianhe)
 
@@ -257,9 +254,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3434. Interaction between reservations and userlimit can result in 
 significant ULF violation (tgraves)
 
-YARN-3516. killing ContainerLocalizer action doesn't take effect when
-private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
-
 YARN-3387. Previous AM's container completed status couldn't pass to 
current
 AM if AM and RM restarted during the same time. (sandflee via jianhe)
 
@@ -268,9 +262,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
 invoked (Brahma Reddy Battula via jlowe)
 
-YARN-3464. Race condition in LocalizerRunner kills localizer before 
-localizing all resources. (Zhihai Xu via kasha)
-
 YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
 (zhijie shen via xgong)
 
@@ -306,6 +297,15 @@ Release 2.7.1 - UNRELEASED
 YARN-3472. Fixed possible leak in DelegationTokenRenewer#allTokens.
 (Rohith Sharmaks via jianhe)
 
+YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+(Zhihai Xu via kasha)
+
+YARN-3516. killing ContainerLocalizer action doesn't take effect when
+private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
+
+YARN-3464. Race condition in LocalizerRunner kills localizer before 
+localizing all resources. (Zhihai Xu via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[24/50] [abbrv] hadoop git commit: YARN-3444. Fix typo capabililty. Contributed by Gabor Liptak.

2015-04-27 Thread zjshen
YARN-3444. Fix typo capabililty. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0331b4dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0331b4dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0331b4dd

Branch: refs/heads/YARN-2928
Commit: 0331b4ddf493f879a05eca9ca7a70f04e636fd3b
Parents: 9a13dce
Author: Akira Ajisaka aajis...@apache.org
Authored: Sat Apr 25 06:08:16 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:50 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 2 ++
 .../yarn/applications/distributedshell/ApplicationMaster.java| 4 ++--
 .../apache/hadoop/yarn/applications/distributedshell/Client.java | 4 ++--
 .../src/site/markdown/WritingYarnApplications.md | 4 ++--
 4 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0331b4dd/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7150068..fa26329 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -329,6 +329,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3387. Previous AM's container completed status couldn't pass to 
current
 AM if AM and RM restarted during the same time. (sandflee via jianhe)
 
+YARN-3444. Fix typo capabililty. (Gabor Liptak via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0331b4dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 2470235..add34af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -638,10 +638,10 @@ public class ApplicationMaster {
 // Dump out information about cluster capability as seen by the
 // resource manager
 int maxMem = response.getMaximumResourceCapability().getMemory();
-LOG.info(Max mem capabililty of resources in this cluster  + maxMem);
+LOG.info(Max mem capability of resources in this cluster  + maxMem);
 
 int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
-LOG.info(Max vcores capabililty of resources in this cluster  + 
maxVCores);
+LOG.info(Max vcores capability of resources in this cluster  + 
maxVCores);
 
 // A resource ask cannot exceed the max.
 if (containerMemory  maxMem) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0331b4dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 033197f..ff2f594 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -528,7 +528,7 @@ public class Client {
 // Memory ask has to be a multiple of min and less than max. 
 // Dump out information about cluster capability as seen by the resource 
manager
 int maxMem = appResponse.getMaximumResourceCapability().getMemory();
-LOG.info(Max mem capabililty of 

[49/50] [abbrv] hadoop git commit: HDFS-8205. CommandFormat#parse() should not parse option as value of option. (Contributed by Peter Shi and Xiaoyu Yao)

2015-04-27 Thread zjshen
HDFS-8205. CommandFormat#parse() should not parse option as value of option. 
(Contributed by Peter Shi and Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72cb2de3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72cb2de3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72cb2de3

Branch: refs/heads/YARN-2928
Commit: 72cb2de31f6466176cd4f2ca12eb3ebcde6ca313
Parents: 3ee2409
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 27 12:23:34 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:55 2015 -0700

--
 .../apache/hadoop/fs/shell/CommandFormat.java   |  3 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   | 28 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  4 +--
 .../src/test/resources/testHDFSConf.xml | 38 
 5 files changed, 73 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72cb2de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index 371168d..0aa3d65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -114,7 +114,8 @@ public class CommandFormat {
 options.put(opt, Boolean.TRUE);
   } else if (optionsWithValue.containsKey(opt)) {
 args.remove(pos);
-if (pos  args.size()  (args.size()  minPar)) {
+if (pos  args.size()  (args.size()  minPar)
+ !args.get(pos).startsWith(-)) {
   arg = args.get(pos);
   args.remove(pos);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72cb2de3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 22d9a21..44fc1e6 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -316,6 +316,34 @@ public class TestCount {
   }
 
   @Test
+  public void processPathWithQuotasByQTVH() throws Exception {
+Path path = new Path(mockfs:/test);
+
+when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+
+PrintStream out = mock(PrintStream.class);
+
+Count count = new Count();
+count.out = out;
+
+LinkedListString options = new LinkedListString();
+options.add(-q);
+options.add(-t);
+options.add(-v);
+options.add(-h);
+options.add(dummy);
+count.processOptions(options);
+String withStorageTypeHeader =
+// 13--- ---17--
+   DISK_QUOTAREM_DISK_QUOTA  +
+SSD_QUOTA REM_SSD_QUOTA  +
+ARCHIVE_QUOTA REM_ARCHIVE_QUOTA  +
+PATHNAME;
+verify(out).println(withStorageTypeHeader);
+verifyNoMoreInteractions(out);
+  }
+
+  @Test
   public void processPathWithQuotasByMultipleStorageTypesContent() throws 
Exception {
 Path path = new Path(mockfs:/test);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72cb2de3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7199c7..00b5db5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -618,6 +618,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
 post-HDFS-7915 DataNode (cmccabe)
 
+HDFS-8205. CommandFormat#parse() should not parse option as
+value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72cb2de3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 

[50/50] [abbrv] hadoop git commit: HDFS-8205. Fix CHANGES.txt

2015-04-27 Thread zjshen
HDFS-8205. Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/277d5fd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/277d5fd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/277d5fd2

Branch: refs/heads/YARN-2928
Commit: 277d5fd2c0b3c14b61be653aaef58a0416f4ac59
Parents: 72cb2de
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 27 12:30:50 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/277d5fd2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 00b5db5..d56ea0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -571,6 +571,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8206. Fix the typos in hadoop-hdfs-httpfs. (Brahma Reddy Battula via 
xyao)
 
+HDFS-8205. CommandFormat#parse() should not parse option as
+value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -618,9 +621,6 @@ Release 2.7.1 - UNRELEASED
 HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
 post-HDFS-7915 DataNode (cmccabe)
 
-HDFS-8205. CommandFormat#parse() should not parse option as
-value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
-
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[09/50] [abbrv] hadoop git commit: HADOOP-11852. Disable symlinks in trunk.

2015-04-27 Thread zjshen
HADOOP-11852. Disable symlinks in trunk.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa6fec33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa6fec33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa6fec33

Branch: refs/heads/YARN-2928
Commit: aa6fec334889c32afb9748d075c656ce33ab8b49
Parents: f62eaf6
Author: Andrew Wang w...@apache.org
Authored: Thu Apr 23 11:47:01 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:47 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  2 ++
 .../java/org/apache/hadoop/fs/FSLinkResolver.java|  4 
 .../main/java/org/apache/hadoop/fs/FileContext.java  |  4 
 .../main/java/org/apache/hadoop/fs/FileSystem.java   | 15 +++
 .../org/apache/hadoop/fs/FileSystemLinkResolver.java |  4 
 .../org/apache/hadoop/fs/RawLocalFileSystem.java |  4 
 .../java/org/apache/hadoop/fs/SymlinkBaseTest.java   |  4 
 .../apache/hadoop/fs/TestFileContextResolveAfs.java  |  4 +++-
 .../src/test/java/org/apache/hadoop/fs/TestStat.java |  4 +++-
 .../apache/hadoop/hdfs/DistributedFileSystem.java|  4 
 .../hadoop/hdfs/server/namenode/FSEditLogLoader.java |  4 
 .../hadoop/hdfs/server/namenode/FSImageFormat.java   |  5 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java|  4 
 .../java/org/apache/hadoop/hdfs/MiniDFSCluster.java  |  3 +++
 .../hadoop/hdfs/server/namenode/TestINodeFile.java   |  4 
 15 files changed, 67 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6fec33/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index db1425f..f232e04 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -201,6 +201,8 @@ Trunk (Unreleased)
 HADOOP-11850. Typos in hadoop-common java docs. (Surendra Singh Lilhore
 via jghoman)
 
+HADOOP-11852. Disable symlinks in trunk.
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6fec33/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
index 831d4ca..ffe4b34 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
@@ -95,6 +95,10 @@ public abstract class FSLinkResolverT {
   +  and symlink resolution is disabled (
   + CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY 
+ )., e);
 }
+if (!FileSystem.areSymlinksEnabled()) {
+  throw new IOException(Symlink resolution is disabled in
+  +  this version of Hadoop.);
+}
 if (count++  FsConstants.MAX_PATH_LINKS) {
   throw new IOException(Possible cyclic loop while  +
 following symbolic link  + path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6fec33/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 0b5863b..ea3f896 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -1431,11 +1431,15 @@ public class FileContext {
*   codetarget/code or codelink/code is not supported
* @throws IOException If an I/O error occurred
*/
+  @SuppressWarnings(deprecation)
   public void createSymlink(final Path target, final Path link,
   final boolean createParent) throws AccessControlException,
   FileAlreadyExistsException, FileNotFoundException,
   ParentNotDirectoryException, UnsupportedFileSystemException, 
   IOException { 
+if (!FileSystem.areSymlinksEnabled()) {
+  throw new UnsupportedOperationException(Symlinks not supported);
+}
 final Path nonRelLink = fixRelativePart(link);
 new FSLinkResolverVoid() {
   @Override


[36/50] [abbrv] hadoop git commit: HDFS-7673. synthetic load generator docs give incorrect/incomplete commands (Brahma Reddy Battula via aw)

2015-04-27 Thread zjshen
HDFS-7673. synthetic load generator docs give incorrect/incomplete commands 
(Brahma Reddy Battula via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2de75df5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2de75df5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2de75df5

Branch: refs/heads/YARN-2928
Commit: 2de75df534e0d32335f8c86ead222e2aefa85295
Parents: 7c2b960
Author: Allen Wittenauer a...@apache.org
Authored: Sat Apr 25 15:05:43 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../hadoop-hdfs/src/site/markdown/SLGUserGuide.md  | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2de75df5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a7b5ed3..07c5151 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -318,6 +318,9 @@ Trunk (Unreleased)
 HDFS-8110. Remove unsupported 'hdfs namenode -rollingUpgrade downgrade'
 from document. (J.Andreina via aajisaka)
 
+HDFS-7673. synthetic load generator docs give incorrect/incomplete commands
+(Brahma Reddy Battula via aw)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2de75df5/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/SLGUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/SLGUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/SLGUserGuide.md
index 48b92c2..38b293e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/SLGUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/SLGUserGuide.md
@@ -32,7 +32,7 @@ Synopsis
 
 The synopsis of the command is:
 
-java LoadGenerator [options]
+yarn jar 
HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-hadoop-version.jar
 NNloadGenerator [options]
 
 Options include:
 
@@ -102,7 +102,7 @@ The generated namespace structure is described by two files 
in the output direct
 
 The synopsis of the command is:
 
-java StructureGenerator [options]
+yarn jar 
HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-hadoop-version.jar
 NNstructureGenerator [options]
 
 Options include:
 
@@ -140,7 +140,7 @@ This tool reads the directory structure and file structure 
from the input direct
 
 The synopsis of the command is:
 
-java DataGenerator [options]
+yarn jar 
HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-hadoop-version.jar
 NNdataGenerator [options]
 
 Options include:
 



[14/50] [abbrv] hadoop git commit: HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

2015-04-27 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
deleted file mode 100644
index 376d7d8..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-
-/** Buffer size parameter. */
-public class BufferSizeParam extends IntegerParam {
-  /** Parameter name. */
-  public static final String NAME = buffersize;
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public BufferSizeParam(final Integer value) {
-super(DOMAIN, value, 1, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public BufferSizeParam(final String str) {
-this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-return NAME;
-  }
-
-  /** @return the value or, if it is null, return the default from conf. */
-  public int getValue(final Configuration conf) {
-return getValue() != null? getValue()
-: conf.getInt(
-CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
-CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 7f31f33..4128a09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -112,7 +112,7 @@ public class TestDFSUtil {
 ListLocatedBlock ls = Arrays.asList(l1, l2);
 LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
 
-BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
+BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
 
 assertTrue(expected 2 blocks but got  + bs.length,
bs.length == 2);
@@ -128,7 +128,7 @@ public class TestDFSUtil {
 corruptCount == 1);
 
 // test an empty location
-bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
+bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
 assertEquals(0, bs.length);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 6733731..0c963f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;

[05/50] [abbrv] hadoop git commit: YARN-3413. Changed Nodelabel attributes (like exclusivity) to be settable only via addToClusterNodeLabels but not changeable at runtime. (Wangda Tan via vinodkv)

2015-04-27 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62eaf62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
index 7bf92af..1ff0bef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
@@ -18,23 +18,27 @@
 
 package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
 
-import java.util.HashSet;
-import java.util.Set;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProtoOrBuilder;
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
 
 public class AddToClusterNodeLabelsRequestPBImpl extends
 AddToClusterNodeLabelsRequest {
-  SetString labels;
   AddToClusterNodeLabelsRequestProto proto = AddToClusterNodeLabelsRequestProto
   .getDefaultInstance();
   AddToClusterNodeLabelsRequestProto.Builder builder = null;
+  private ListNodeLabel updatedNodeLabels;
   boolean viaProto = false;
 
   public AddToClusterNodeLabelsRequestPBImpl() {
-this.builder = AddToClusterNodeLabelsRequestProto.newBuilder();
+builder = AddToClusterNodeLabelsRequestProto.newBuilder();
   }
 
   public AddToClusterNodeLabelsRequestPBImpl(
@@ -43,18 +47,11 @@ public class AddToClusterNodeLabelsRequestPBImpl extends
 viaProto = true;
   }
 
-  private void maybeInitBuilder() {
-if (viaProto || builder == null) {
-  builder = AddToClusterNodeLabelsRequestProto.newBuilder(proto);
-}
-viaProto = false;
-  }
-
-  private void mergeLocalToBuilder() {
-if (this.labels != null  !this.labels.isEmpty()) {
-  builder.clearNodeLabels();
-  builder.addAllNodeLabels(this.labels);
-}
+  public AddToClusterNodeLabelsRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
   }
 
   private void mergeLocalToProto() {
@@ -65,35 +62,30 @@ public class AddToClusterNodeLabelsRequestPBImpl extends
 viaProto = true;
   }
 
-  public AddToClusterNodeLabelsRequestProto getProto() {
-mergeLocalToProto();
-proto = viaProto ? proto : builder.build();
-viaProto = true;
-return proto;
-  }
-
-  private void initLabels() {
-if (this.labels != null) {
-  return;
+  private void mergeLocalToBuilder() {
+if (this.updatedNodeLabels != null) {
+  addNodeLabelsToProto();
 }
-AddToClusterNodeLabelsRequestProtoOrBuilder p = viaProto ? proto : builder;
-this.labels = new HashSetString();
-this.labels.addAll(p.getNodeLabelsList());
   }
 
-  @Override
-  public void setNodeLabels(SetString labels) {
+  private void addNodeLabelsToProto() {
 maybeInitBuilder();
-if (labels == null || labels.isEmpty()) {
-  builder.clearNodeLabels();
+builder.clearNodeLabels();
+ListNodeLabelProto protoList = new ArrayListNodeLabelProto();
+for (NodeLabel r : this.updatedNodeLabels) {
+  protoList.add(convertToProtoFormat(r));
 }
-this.labels = labels;
+builder.addAllNodeLabels(protoList);
   }
 
   @Override
-  public SetString getNodeLabels() {
-initLabels();
-return this.labels;
+  public boolean equals(Object other) {
+if (other == null)
+  return false;
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
   }
 
   @Override
@@ -101,14 +93,53 @@ public class AddToClusterNodeLabelsRequestPBImpl extends
 assert false : hashCode not designed;
 return 0;
   }
-  
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = AddToClusterNodeLabelsRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
   @Override
-  public boolean equals(Object other) {
-if (other == null)
-  return false;
-  

[13/50] [abbrv] hadoop git commit: HADOOP-11802. DomainSocketWatcher thread terminates sometimes after there is an I/O error during requestShortCircuitShm (cmccabe)

2015-04-27 Thread zjshen
HADOOP-11802. DomainSocketWatcher thread terminates sometimes after there is an 
I/O error during requestShortCircuitShm (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbf031fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbf031fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbf031fa

Branch: refs/heads/YARN-2928
Commit: dbf031fa13432e26102eba7ae3253cfedca836c3
Parents: d1b933b
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 23 18:59:52 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:48 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++
 .../hadoop/net/unix/DomainSocketWatcher.java|  4 +-
 .../hadoop/net/unix/DomainSocketWatcher.c   | 10 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../hdfs/server/datanode/DataXceiver.java   | 18 -
 .../hdfs/shortcircuit/DfsClientShmManager.java  |  3 +-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  6 ++
 .../shortcircuit/TestShortCircuitCache.java | 83 ++--
 8 files changed, 113 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf031fa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 22ef212..c40de45 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -522,6 +522,8 @@ Release 2.8.0 - UNRELEASED
 split calculation (gera)
 
   BUG FIXES
+HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
+is an I/O error during requestShortCircuitShm (cmccabe)
 
 HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
 jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
@@ -574,6 +576,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11730. Regression: s3n read failure recovery broken.
 (Takenori Sato via stevel)
 
+HADOOP-11802. DomainSocketWatcher thread terminates sometimes after there
+is an I/O error during requestShortCircuitShm (cmccabe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf031fa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 03b52e0..5648ae1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -512,8 +512,8 @@ public final class DomainSocketWatcher implements Closeable 
{
 }
   } catch (InterruptedException e) {
 LOG.info(toString() +  terminating on InterruptedException);
-  } catch (IOException e) {
-LOG.error(toString() +  terminating on IOException, e);
+  } catch (Throwable e) {
+LOG.error(toString() +  terminating on exception, e);
   } finally {
 lock.lock();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf031fa/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
index dbaa4fe..596601b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
@@ -111,7 +111,7 @@ JNIEnv *env, jobject obj, jint fd)
   pollfd = sd-pollfd[sd-used_size];
   sd-used_size++;
   pollfd-fd = fd;
-  pollfd-events = POLLIN;
+  pollfd-events = POLLIN | POLLHUP;
   pollfd-revents = 0;
 }
 
@@ -162,7 +162,10 @@ JNIEnv *env, jobject obj)
   GetLongField(env, obj, fd_set_data_fid);
   used_size = sd-used_size;
   for (i = 0; i  used_size; i++) {
-if (sd-pollfd[i].revents  POLLIN) {
+// We check for both POLLIN and POLLHUP, because on some OSes, when a 
socket
+// is shutdown(), it sends POLLHUP rather than POLLIN.
+if ((sd-pollfd[i].revents  POLLIN) ||
+

[20/50] [abbrv] hadoop git commit: HDFS-8231. StackTrace displayed at client while QuotaByStorageType exceeds (Contributed by J.Andreina and Xiaoyu Yao)

2015-04-27 Thread zjshen
HDFS-8231. StackTrace displayed at client while QuotaByStorageType exceeds 
(Contributed by J.Andreina and Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f01a146a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f01a146a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f01a146a

Branch: refs/heads/YARN-2928
Commit: f01a146aabaf41aaa08810fafef8cf523369407f
Parents: 80cedc09
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 24 12:51:04 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java   | 7 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java| 2 ++
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 2 ++
 5 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01a146a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0e00025..b442bad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -548,6 +548,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8217. During block recovery for truncate Log new Block Id in case of
 copy-on-truncate is true. (vinayakumarb)
 
+HDFS-8231. StackTrace displayed at client while QuotaByStorageType exceeds
+(J.Andreina and Xiaoyu Yao via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01a146a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 63145b0..8fc9e77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1425,6 +1425,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  ParentNotDirectoryException.class,
  NSQuotaExceededException.class, 
  DSQuotaExceededException.class,
+ QuotaByStorageTypeExceededException.class,
  UnresolvedPathException.class,
  SnapshotAccessControlException.class);
 } finally {
@@ -1467,6 +1468,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  FileNotFoundException.class,
  SafeModeException.class,
  DSQuotaExceededException.class,
+ QuotaByStorageTypeExceededException.class,
  UnsupportedOperationException.class,
  UnresolvedPathException.class,
  SnapshotAccessControlException.class);
@@ -1542,6 +1544,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  FileNotFoundException.class,
  SafeModeException.class,
  DSQuotaExceededException.class,
+ QuotaByStorageTypeExceededException.class,
  UnresolvedPathException.class,
  SnapshotAccessControlException.class);
 } finally {
@@ -1598,6 +1601,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   throw re.unwrapRemoteException(AccessControlException.class,
  NSQuotaExceededException.class,
  DSQuotaExceededException.class,
+ QuotaByStorageTypeExceededException.class,
  UnresolvedPathException.class,
  SnapshotAccessControlException.class);
 } finally {
@@ -1635,6 +1639,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 } catch(RemoteException re) {
   throw 

[17/50] [abbrv] hadoop git commit: HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

2015-04-27 Thread zjshen
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by 
Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1b933b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1b933b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1b933b2

Branch: refs/heads/YARN-2928
Commit: d1b933b266e42b47e4c5378d6d3193de3c46d7a2
Parents: 45ccd91
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 23 17:33:05 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:48 2015 -0700

--
 .../org/apache/hadoop/io/retry/RetryUtils.java  |4 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  109 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |1 +
 .../hdfs/protocol/HdfsConstantsClient.java  |4 +
 .../hadoop/hdfs/web/ByteRangeInputStream.java   |  232 +++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  485 ++
 .../hdfs/web/KerberosUgiAuthenticator.java  |   45 +
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   44 +
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  179 +++
 .../hadoop/hdfs/web/URLConnectionFactory.java   |  187 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 1461 +
 .../hdfs/web/resources/BufferSizeParam.java |   60 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 .../hadoop/hdfs/BlockStorageLocationUtil.java   |3 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |5 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|   89 +-
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |2 +-
 .../hdfs/protocol/HdfsLocatedFileStatus.java|4 +-
 .../server/namenode/NameNodeHttpServer.java |5 +-
 .../hadoop/hdfs/web/ByteRangeInputStream.java   |  232 ---
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  484 --
 .../hdfs/web/KerberosUgiAuthenticator.java  |   45 -
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   44 -
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  179 ---
 .../hadoop/hdfs/web/URLConnectionFactory.java   |  187 ---
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 1463 --
 .../hdfs/web/resources/BufferSizeParam.java |   60 -
 .../org/apache/hadoop/hdfs/TestDFSUtil.java |4 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |4 +-
 30 files changed, 2830 insertions(+), 2795 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
index e6f4519..b2e115f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
@@ -60,7 +60,7 @@ public class RetryUtils {
   boolean defaultRetryPolicyEnabled,
   String retryPolicySpecKey,
   String defaultRetryPolicySpec,
-  final Class? extends Exception remoteExceptionToRetry
+  final String remoteExceptionToRetry
   ) {
 
 final RetryPolicy multipleLinearRandomRetry = 
@@ -94,7 +94,7 @@ public class RetryUtils {
   final RetryPolicy p;
   if (e instanceof RemoteException) {
 final RemoteException re = (RemoteException)e;
-p = remoteExceptionToRetry.getName().equals(re.getClassName())?
+p = remoteExceptionToRetry.equals(re.getClassName())?
 multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
   } else if (e instanceof IOException || e instanceof 
ServiceException) {
 p = multipleLinearRandomRetry;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 84fb12c..97d3408 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -19,10 +19,17 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Maps;
+import org.apache.commons.io.Charsets;
 import 

[16/50] [abbrv] hadoop git commit: HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

2015-04-27 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b933b2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
new file mode 100644
index 000..d28f571
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -0,0 +1,1461 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import java.io.BufferedOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import javax.ws.rs.core.MediaType;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.HAUtilClient;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.web.resources.*;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/** A FileSystem for HDFS over the web. */
+public class WebHdfsFileSystem extends FileSystem
+implements DelegationTokenRenewer.Renewable, 
TokenAspect.TokenManagementDelegator {
+  public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
+  /** WebHdfs version. */
+  public static final int VERSION = 1;
+  /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
+  public static final String PATH_PREFIX = / + 
WebHdfsConstants.WEBHDFS_SCHEME + /v + VERSION;
+
+  /** Default connection factory may be overridden in 

[02/50] [abbrv] hadoop git commit: HDFS-4448. Allow HA NN to start in secure mode with wildcard address configured (atm via asuresh)

2015-04-27 Thread zjshen
HDFS-4448. Allow HA NN to start in secure mode with wildcard address configured 
(atm via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4dc6af1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4dc6af1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4dc6af1

Branch: refs/heads/YARN-2928
Commit: a4dc6af18676dbf80d669ae1c58499e8307f92c2
Parents: eb4aee5
Author: Arun Suresh asur...@apache.org
Authored: Thu Apr 23 01:42:24 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 13:35:22 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java   | 9 +
 2 files changed, 4 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4dc6af1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 521315b..b9f8787 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -461,6 +461,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8218. Move classes that used by ClientProtocol into hdfs-client.
 (wheat9)
 
+HDFS-4448. Allow HA NN to start in secure mode with wildcard address
+configured (atm via asuresh)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4dc6af1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 60a496f..078b0bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1017,17 +1017,10 @@ public class DFSUtil {
*/
   @VisibleForTesting
   static String substituteForWildcardAddress(String configuredAddress,
-String defaultHost) throws IOException {
+String defaultHost) {
 InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
-InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
-+ :0);
 final InetAddress addr = sockAddr.getAddress();
 if (addr != null  addr.isAnyLocalAddress()) {
-  if (UserGroupInformation.isSecurityEnabled() 
-  defaultSockAddr.getAddress().isAnyLocalAddress()) {
-throw new IOException(Cannot use a wildcard address with security.  +
-Must explicitly set bind address for Kerberos);
-  }
   return defaultHost + : + sockAddr.getPort();
 } else {
   return configuredAddress;



[03/50] [abbrv] hadoop git commit: YARN-2605. [RM HA] Rest api endpoints doing redirect incorrectly. (Xuan Gong via stevel)

2015-04-27 Thread zjshen
YARN-2605. [RM HA] Rest api endpoints doing redirect incorrectly. (Xuan Gong 
via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb4aee51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb4aee51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb4aee51

Branch: refs/heads/YARN-2928
Commit: eb4aee51cbce606baf50920a70c7ed4386b985f6
Parents: 73038ef
Author: Steve Loughran ste...@apache.org
Authored: Thu Apr 23 09:22:09 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 13:35:22 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/yarn/client/TestRMFailover.java | 5 +
 .../yarn/server/resourcemanager/webapp/RMWebAppFilter.java | 6 +++---
 3 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb4aee51/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6e08a0e..de794e5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -333,6 +333,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
 without making a copy. (Jason Lowe via jianhe)
 
+YARN-2605. [RM HA] Rest api endpoints doing redirect incorrectly.
+(Xuan Gong via stevel)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb4aee51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 0634cc3..4938255 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestRMFailover extends ClientBaseWithFixes {
@@ -274,6 +275,10 @@ public class TestRMFailover extends ClientBaseWithFixes {
 assertEquals(404, response.getResponseCode());
   }
 
+  // ignore this testcase, Always gets too many redirect loops exception
+  // Probably because of the limitation of MiniYARNCluster.
+  // Verified the behavior in a single node cluster.
+  @Ignore
   @Test
   public void testRMWebAppRedirect() throws YarnException,
   InterruptedException, IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb4aee51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
index 49fd1f5..b1027a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
@@ -72,11 +72,11 @@ public class RMWebAppFilter extends GuiceContainer {
 
   if (redirectPath != null  !redirectPath.isEmpty()) {
 String redirectMsg =
-This is standby RM. Redirecting to the current active RM: 
-+ redirectPath;
-response.addHeader(Refresh, 3; url= + redirectPath);
+This is standby RM. The redirect url is:  + redirectPath;
 PrintWriter out = response.getWriter();
 out.println(redirectMsg);
+response.setHeader(Location, redirectPath);
+response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
 return;
   }
 }



[06/50] [abbrv] hadoop git commit: YARN-3413. Changed Nodelabel attributes (like exclusivity) to be settable only via addToClusterNodeLabels but not changeable at runtime. (Wangda Tan via vinodkv)

2015-04-27 Thread zjshen
YARN-3413. Changed Nodelabel attributes (like exclusivity) to be settable only 
via addToClusterNodeLabels but not changeable at runtime. (Wangda Tan via 
vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f62eaf62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f62eaf62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f62eaf62

Branch: refs/heads/YARN-2928
Commit: f62eaf625d3b2b7e44a2fa37c3fffc98dda10b2a
Parents: 78ac5d3
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 23 11:19:55 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:46 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   3 +-
 .../GetClusterNodeLabelsResponse.java   |   9 +-
 .../hadoop/yarn/api/records/NodeLabel.java  |  79 +++---
 .../ResourceManagerAdministrationProtocol.java  |   8 -
 .../AddToClusterNodeLabelsRequest.java  |  24 +--
 .../UpdateNodeLabelsRequest.java|  49 ---
 .../UpdateNodeLabelsResponse.java   |  37 -
 ...esourcemanager_administration_protocol.proto |   1 -
 ..._server_resourcemanager_service_protos.proto |   7 +-
 .../src/main/proto/yarn_protos.proto|   2 +-
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../TestDistributedShellWithNodeLabels.java |   2 +-
 .../hadoop/yarn/client/api/YarnClient.java  |   3 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   3 +-
 .../hadoop/yarn/client/cli/ClusterCLI.java  |  18 +--
 .../hadoop/yarn/client/cli/RMAdminCLI.java  |  73 +-
 .../hadoop/yarn/client/cli/TestClusterCLI.java  |  21 ++-
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |  60 ++--
 .../pb/GetClusterNodeLabelsResponsePBImpl.java  | 119 +--
 .../api/records/impl/pb/NodeLabelPBImpl.java|  18 +--
 .../nodelabels/CommonNodeLabelsManager.java |  95 ++--
 .../nodelabels/FileSystemNodeLabelsStore.java   |  40 ++---
 .../hadoop/yarn/nodelabels/NodeLabelsStore.java |  10 +-
 .../hadoop/yarn/nodelabels/RMNodeLabel.java |  19 ++-
 .../event/NodeLabelsStoreEventType.java |   3 +-
 .../event/StoreNewClusterNodeLabels.java|  10 +-
 .../event/StoreUpdateNodeLabelsEvent.java   |  36 -
 ...nagerAdministrationProtocolPBClientImpl.java |  21 +--
 ...agerAdministrationProtocolPBServiceImpl.java |  23 ---
 .../pb/AddToClusterNodeLabelsRequestPBImpl.java | 119 +--
 .../impl/pb/UpdateNodeLabelsRequestPBImpl.java  | 145 ---
 .../impl/pb/UpdateNodeLabelsResponsePBImpl.java |  67 -
 .../hadoop/yarn/api/TestPBImplRecords.java  |  18 +--
 .../DummyCommonNodeLabelsManager.java   |  11 +-
 .../yarn/nodelabels/NodeLabelTestBase.java  |   1 +
 .../nodelabels/TestCommonNodeLabelsManager.java | 104 ++---
 .../TestFileSystemNodeLabelsStore.java  |  52 +++
 .../server/resourcemanager/AdminService.java|  24 ---
 .../scheduler/capacity/CSQueueUtils.java|   2 +-
 .../scheduler/capacity/ParentQueue.java |   2 +-
 .../resourcemanager/webapp/RMWebServices.java   |   4 +-
 .../resourcemanager/TestClientRMService.java|  12 +-
 .../server/resourcemanager/TestRMRestart.java   |   6 +-
 .../TestResourceTrackerService.java |  12 +-
 .../nodelabels/NullRMNodeLabelsManager.java |   9 +-
 .../nodelabels/TestRMNodeLabelsManager.java |  20 +--
 .../capacity/TestCapacityScheduler.java |   2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   |   2 +-
 .../capacity/TestContainerAllocation.java   |   7 -
 .../TestNodeLabelContainerAllocation.java   |  53 +++
 .../scheduler/capacity/TestQueueParsing.java|   8 +-
 ...TestWorkPreservingRMRestartForNodeLabel.java |   4 +-
 52 files changed, 600 insertions(+), 879 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62eaf62/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 279c4f1..2b7cd5f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -58,6 +58,7 @@ import 

[10/50] [abbrv] hadoop git commit: HADOOP-11730. Regression: s3n read failure recovery broken. (Takenori Sato via stevel)

2015-04-27 Thread zjshen
HADOOP-11730. Regression: s3n read failure recovery broken.  (Takenori Sato via 
stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ff751d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ff751d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ff751d3

Branch: refs/heads/YARN-2928
Commit: 2ff751da232fa6a686e98583b8875e2b617c
Parents: 9849a2a
Author: Steve Loughran ste...@apache.org
Authored: Thu Apr 23 21:39:30 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:47 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../hadoop/fs/s3native/NativeS3FileSystem.java  | 32 +++-
 .../NativeS3FileSystemContractBaseTest.java | 24 +++
 3 files changed, 39 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff751d3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f232e04..777828e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -568,6 +568,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11868. Invalid user logins trigger large backtraces in server log
 (Chang Li via jlowe)
 
+HADOOP-11730. Regression: s3n read failure recovery broken.
+(Takenori Sato via stevel)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff751d3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
index a2f9805..0ad8e5f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.s3.S3Exception;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
@@ -124,7 +125,7 @@ public class NativeS3FileSystem extends FileSystem {
 key);
 LOG.debug({}, e, e);
 try {
-  seek(pos);
+  reopen(pos);
   result = in.read();
 } catch (EOFException eof) {
   LOG.debug(EOF on input stream read: {}, eof, eof);
@@ -153,7 +154,7 @@ public class NativeS3FileSystem extends FileSystem {
   } catch (IOException e) {
 LOG.info( Received IOException while reading '{}', +
attempting to reopen., key);
-seek(pos);
+reopen(pos);
 result = in.read(b, off, len);
   }
   if (result  0) {
@@ -173,16 +174,21 @@ public class NativeS3FileSystem extends FileSystem {
 /**
  * Close the inner stream if not null. Even if an exception
  * is raised during the close, the field is set to null
- * @throws IOException if raised by the close() operation.
  */
-private void closeInnerStream() throws IOException {
-  if (in != null) {
-try {
-  in.close();
-} finally {
-  in = null;
-}
-  }
+private void closeInnerStream() {
+  IOUtils.closeStream(in);
+  in = null;
+}
+
+/**
+ * Reopen a new input stream with the specified position
+ * @param pos the position to reopen a new stream
+ * @throws IOException
+ */
+private synchronized void reopen(long pos) throws IOException {
+LOG.debug(Reopening key '{}' for reading at position '{}, key, pos);
+InputStream newStream = store.retrieve(key, pos);
+updateInnerStream(newStream, pos);
 }
 
 /**
@@ -207,9 +213,7 @@ public class NativeS3FileSystem extends FileSystem {
   }
   if (pos != newpos) {
 // the seek is attempting to move the current position
-LOG.debug(Opening key '{}' for reading at position '{}, key, newpos);
-InputStream newStream = store.retrieve(key, newpos);
-updateInnerStream(newStream, newpos);
+reopen(newpos);
   }
 }
 


[21/50] [abbrv] hadoop git commit: HDFS-8217. During block recovery for truncate Log new Block Id in case of copy-on-truncate is true. (Contributed by Vinayakumar B)

2015-04-27 Thread zjshen
HDFS-8217. During block recovery for truncate Log new Block Id in case of 
copy-on-truncate is true. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80cedc09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80cedc09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80cedc09

Branch: refs/heads/YARN-2928
Commit: 80cedc09e503fb9c258bf70ffe23cceaf0bdaac6
Parents: b512bad
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 24 12:16:41 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java|  4 +++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   | 12 +++-
 3 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80cedc09/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b0a0a50..0e00025 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -545,6 +545,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7993. Provide each Replica details in fsck (J.Andreina via 
vinayakumarb)
 
+HDFS-8217. During block recovery for truncate Log new Block Id in case of
+copy-on-truncate is true. (vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80cedc09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e81da52..23ab43a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2840,7 +2840,9 @@ public class DataNode extends ReconfigurableBase
 
 LOG.info(who +  calls recoverBlock( + block
 + , targets=[ + Joiner.on(, ).join(targets) + ]
-+ , newGenerationStamp= + rb.getNewGenerationStamp() + ));
++ ((rb.getNewBlock() == null) ? , newGenerationStamp=
++ rb.getNewGenerationStamp() : , newBlock= + rb.getNewBlock())
++ ));
   }
 
   @Override // ClientDataNodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80cedc09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 3599fad..4477dc4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4229,6 +4229,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 String src = ;
 waitForLoadingFSImage();
 writeLock();
+boolean copyTruncate = false;
+BlockInfoContiguousUnderConstruction truncatedBlock = null;
 try {
   checkOperation(OperationCategory.WRITE);
   // If a DN tries to commit to the standby, the recovery will
@@ -4285,11 +4287,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return;
   }
 
-  BlockInfoContiguousUnderConstruction truncatedBlock =
-  (BlockInfoContiguousUnderConstruction) iFile.getLastBlock();
+  truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile
+  .getLastBlock();
   long recoveryId = truncatedBlock.getBlockRecoveryId();
-  boolean copyTruncate =
-  truncatedBlock.getBlockId() != storedBlock.getBlockId();
+  copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
   if(recoveryId != newgenerationstamp) {
 throw new IOException(The recovery id  + newgenerationstamp
   +  does not match current recovery id 
@@ -4382,7 +4383,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (closeFile) {
   LOG.info(commitBlockSynchronization(oldBlock= + oldBlock
   + , file= 

[11/50] [abbrv] hadoop git commit: HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode. Contributed by surendra singh lilhore

2015-04-27 Thread zjshen
HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.  
Contributed by surendra singh lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9849a2ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9849a2ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9849a2ae

Branch: refs/heads/YARN-2928
Commit: 9849a2aebe35e0caf53e2832cacf3b2c41af6e14
Parents: aa6fec3
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 23 11:55:06 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:47 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 29 
 .../hadoop/hdfs/server/mover/TestMover.java | 47 
 3 files changed, 79 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9849a2ae/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b9f8787..e00f67e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -581,6 +581,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8163. Using monotonicNow for block report scheduling causes
 test failures on recently restarted systems. (Arpit Agarwal)
 
+HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
+(surendra singh lilhore via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9849a2ae/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index a3fd251..a7a6c4a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -469,6 +469,25 @@ public class Dispatcher {
   public String toString() {
 return getDisplayName();
   }
+
+  @Override
+  public int hashCode() {
+return getStorageType().hashCode() ^ getDatanodeInfo().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+if (this == obj) {
+  return true;
+} else if (obj == null || !(obj instanceof StorageGroup)) {
+  return false;
+} else {
+  final StorageGroup that = (StorageGroup) obj;
+  return this.getStorageType() == that.getStorageType()
+   this.getDatanodeInfo().equals(that.getDatanodeInfo());
+}
+  }
+
 }
 
 final DatanodeInfo datanode;
@@ -753,6 +772,16 @@ public class Dispatcher {
 }
   }
 }
+
+@Override
+public int hashCode() {
+  return super.hashCode();
+}
+
+@Override
+public boolean equals(Object obj) {
+  return super.equals(obj);
+}
   }
 
   public Dispatcher(NameNodeConnector nnc, SetString includedNodes,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9849a2ae/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 1de236e..b2f9fce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -277,4 +277,51 @@ public class TestMover {
cluster.shutdown();
 }
   }
+
+  @Test(timeout = 30)
+  public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
+// HDFS-8147
+final Configuration conf = new HdfsConfiguration();
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(3)
+.storageTypes(
+new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE },
+{ StorageType.DISK, StorageType.DISK },
+{ StorageType.DISK, StorageType.ARCHIVE } }).build();
+try {
+  cluster.waitActive();
+  final DistributedFileSystem dfs = cluster.getFileSystem();
+  final String file = 

[01/50] [abbrv] hadoop git commit: YARN-3434. Interaction between reservations and userlimit can result in significant ULF violation

2015-04-27 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 fa5cc7524 - 277d5fd2c


YARN-3434. Interaction between reservations and userlimit can result in 
significant ULF violation


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef9bcb4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef9bcb4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef9bcb4b

Branch: refs/heads/YARN-2928
Commit: ef9bcb4ba17121990bcd5c9b3d428431894196db
Parents: a4dc6af
Author: tgraves tgra...@apache.org
Authored: Thu Apr 23 14:39:25 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 13:35:22 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/ResourceLimits.java   |  28 +++-
 .../scheduler/capacity/AbstractCSQueue.java |  94 +--
 .../scheduler/capacity/LeafQueue.java   | 162 ---
 .../scheduler/capacity/TestReservations.java|  65 +---
 5 files changed, 186 insertions(+), 166 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9bcb4b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index de794e5..020fa29 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -316,6 +316,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3495. Confusing log generated by FairScheduler.
 (Brahma Reddy Battula via ozawa)
 
+YARN-3434. Interaction between reservations and userlimit can result in 
+significant ULF violation (tgraves)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9bcb4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 12333e8..8074794 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -19,22 +19,44 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
  * Resource limits for queues/applications, this means max overall (please note
  * that, it's not extra) resource you can get.
  */
 public class ResourceLimits {
+  volatile Resource limit;
+
+  // This is special limit that goes with the RESERVE_CONT_LOOK_ALL_NODES
+  // config. This limit indicates how much we need to unreserve to allocate
+  // another container.
+  private volatile Resource amountNeededUnreserve;
+
   public ResourceLimits(Resource limit) {
+this.amountNeededUnreserve = Resources.none();
 this.limit = limit;
   }
-  
-  volatile Resource limit;
+
+  public ResourceLimits(Resource limit, Resource amountNeededUnreserve) {
+this.amountNeededUnreserve = amountNeededUnreserve;
+this.limit = limit;
+  }
+
   public Resource getLimit() {
 return limit;
   }
-  
+
+  public Resource getAmountNeededUnreserve() {
+return amountNeededUnreserve;
+  }
+
   public void setLimit(Resource limit) {
 this.limit = limit;
   }
+
+  public void setAmountNeededUnreserve(Resource amountNeededUnreserve) {
+this.amountNeededUnreserve = amountNeededUnreserve;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9bcb4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java

[44/50] [abbrv] hadoop git commit: HADOOP-11865. Incorrect path mentioned in document for accessing script files (J.Andreina via aw)

2015-04-27 Thread zjshen
HADOOP-11865. Incorrect path mentioned in document for accessing script files 
(J.Andreina via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0992223
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0992223
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0992223

Branch: refs/heads/YARN-2928
Commit: c09922237d2a9391950f74150d58dd607d7c4694
Parents: 7dc1af5
Author: Allen Wittenauer a...@apache.org
Authored: Sun Apr 26 09:55:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:53 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm   | 2 +-
 .../hadoop-hdfs/src/site/markdown/HdfsUserGuide.md   | 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0992223/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 826c77e..5ba71a4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -445,6 +445,9 @@ Trunk (Unreleased)
 
 HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)
 
+HADOOP-11865. Incorrect path mentioned in document for accessing script
+files (J.Andreina via aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0992223/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 44b5bfb..ad4bfca 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -101,7 +101,7 @@ The Aggregation interval is configured via the property :
 
 $H3 Start/Stop the KMS
 
-To start/stop KMS use KMS's bin/kms.sh script. For example:
+To start/stop KMS use KMS's sbin/kms.sh script. For example:
 
 hadoop-${project.version} $ sbin/kms.sh start
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0992223/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index ffd8532..54197a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -307,7 +307,7 @@ When Hadoop is upgraded on an existing cluster, as with any 
software upgrade, it
 
 *   Stop the cluster and distribute new version of Hadoop.
 
-*   Run the new version with `-upgrade` option (`bin/start-dfs.sh -upgrade`).
+*   Run the new version with `-upgrade` option (`sbin/start-dfs.sh -upgrade`).
 
 *   Most of the time, cluster works just fine. Once the new HDFS is
 considered working well (may be after a few days of operation),
@@ -319,7 +319,7 @@ When Hadoop is upgraded on an existing cluster, as with any 
software upgrade, it
 
 * stop the cluster and distribute earlier version of Hadoop.
 
-* start the cluster with rollback option. (`bin/start-dfs.sh -rollback`).
+* start the cluster with rollback option. (`sbin/start-dfs.sh -rollback`).
 
 When upgrading to a new version of HDFS, it is necessary to rename or delete 
any paths that are reserved in the new version of HDFS. If the NameNode 
encounters a reserved path during upgrade, it will print an error like the 
following:
 



[40/50] [abbrv] hadoop git commit: HADOOP-11857. Fix CommandFormat#commandFormat java doc annotation. Contributed by J.Andreina.

2015-04-27 Thread zjshen
HADOOP-11857. Fix CommandFormat#commandFormat java doc annotation. Contributed 
by J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd8af28f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd8af28f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd8af28f

Branch: refs/heads/YARN-2928
Commit: bd8af28f04e0c8e087c0bf74240ff0d3162319b5
Parents: 87c7441
Author: Jakob Homan jgho...@gmail.com
Authored: Sun Apr 26 18:35:01 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:53 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 5 -
 .../src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd8af28f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 597496a..390dbaf 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -204,7 +204,10 @@ Trunk (Unreleased)
 HADOOP-11850. Typos in hadoop-common java docs. (Surendra Singh Lilhore
 via jghoman)
 
-HADOOP-11852. Disable symlinks in trunk.
+HADOOP-11852. Disable symlinks in trunk. (Andrew Wang)
+
+HADOOP-11857. Fix CommandFormat#commandFormat java doc annotation.
+(J.Andreina via jghoman)
 
   BUG FIXES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd8af28f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index 0f9aa38..371168d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -43,7 +43,7 @@ public class CommandFormat {
* @see #CommandFormat(int, int, String...)
*/
   @Deprecated
-  public CommandFormat(String n, int min, int max, String ... possibleOpt) {
+  public CommandFormat(String name, int min, int max, String ... possibleOpt) {
 this(min, max, possibleOpt);
   }
   



[45/50] [abbrv] hadoop git commit: MAPREDUCE-6057. Remove obsolete entries from mapred-default.xml (Ray Chiang via aw)

2015-04-27 Thread zjshen
MAPREDUCE-6057. Remove obsolete entries from mapred-default.xml (Ray Chiang via 
aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3044c8da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3044c8da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3044c8da

Branch: refs/heads/YARN-2928
Commit: 3044c8da56aed66ae0eb48988def9e6693be60c2
Parents: bd8af28
Author: Allen Wittenauer a...@apache.org
Authored: Sun Apr 26 20:31:40 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:54 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../java/org/apache/hadoop/mapred/MapTask.java  |  5 ++--
 .../apache/hadoop/mapreduce/MRJobConfig.java|  3 ++
 .../src/main/resources/mapred-default.xml   | 31 
 .../resources/job_1329348432655_0001_conf.xml   |  4 ---
 .../src/main/data/2jobs2min-rumen-jh.json   |  6 
 6 files changed, 9 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044c8da/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f895034..dca42c4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -120,6 +120,9 @@ Trunk (Unreleased)
 MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki
 via aw)
 
+MAPREDUCE-6057. Remove obsolete entries from mapred-default.xml
+(Ray Chiang via aw)
+
   BUG FIXES
 
 MAPREDUCE-6191. Improve clearing stale state of Java serialization

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044c8da/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index c4957b7..a523291 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -978,8 +978,9 @@ public class MapTask extends Task {
 throw new IOException(
 Invalid \ + JobContext.IO_SORT_MB + \:  + sortmb);
   }
-  sorter = ReflectionUtils.newInstance(job.getClass(map.sort.class,
-QuickSort.class, IndexedSorter.class), job);
+  sorter = ReflectionUtils.newInstance(job.getClass(
+   MRJobConfig.MAP_SORT_CLASS, QuickSort.class,
+   IndexedSorter.class), job);
   // buffers and accounting
   int maxMemUsage = sortmb  20;
   maxMemUsage -= maxMemUsage % METASIZE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044c8da/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 8176efd..bc31bb5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -28,6 +28,9 @@ import org.apache.hadoop.yarn.util.Apps;
 @InterfaceStability.Evolving
 public interface MRJobConfig {
 
+  // Used by MapTask
+  public static final String MAP_SORT_CLASS = map.sort.class;
+
   // Put all of the attribute names in here so that Job and JobContext are
   // consistent.
   public static final String INPUT_FORMAT_CLASS_ATTR = 
mapreduce.job.inputformat.class;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044c8da/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 

[39/50] [abbrv] hadoop git commit: HDFS-8206. Fix the typos in hadoop-hdfs-httpfs. (Brahma Reddy Battula via xyao)

2015-04-27 Thread zjshen
HDFS-8206. Fix the typos in hadoop-hdfs-httpfs. (Brahma Reddy Battula via xyao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b275cfba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b275cfba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b275cfba

Branch: refs/heads/YARN-2928
Commit: b275cfba502505006ef536582b00b20979e29823
Parents: d953a9c
Author: Xiaoyu Yao x...@apache.org
Authored: Sat Apr 25 21:41:35 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:52 2015 -0700

--
 .../org/apache/hadoop/fs/http/client/HttpFSFileSystem.java | 6 +++---
 .../hadoop/fs/http/server/CheckUploadContentTypeFilter.java| 4 ++--
 .../java/org/apache/hadoop/fs/http/server/HttpFSServer.java| 2 +-
 .../org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java | 4 ++--
 .../java/org/apache/hadoop/lib/servlet/HostnameFilter.java | 4 ++--
 .../src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 7 files changed, 14 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b275cfba/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index e797d12..3a6ce7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -238,7 +238,7 @@ public class HttpFSFileSystem extends FileSystem
* @return a codeHttpURLConnection/code for the HttpFSServer server,
* authenticated and ready to use for the specified path and file 
system operation.
*
-   * @throws IOException thrown if an IO error occurrs.
+   * @throws IOException thrown if an IO error occurs.
*/
   private HttpURLConnection getConnection(final String method,
   MapString, String params, Path path, boolean makeQualified)
@@ -263,7 +263,7 @@ public class HttpFSFileSystem extends FileSystem
* HttpFSServer server, authenticated and ready to use for the
* specified path and file system operation.
*
-   * @throws IOException thrown if an IO error occurrs.
+   * @throws IOException thrown if an IO error occurs.
*/
   private HttpURLConnection getConnection(final String method,
   MapString, String params, MapString, ListString multiValuedParams,
@@ -301,7 +301,7 @@ public class HttpFSFileSystem extends FileSystem
* @return a codeHttpURLConnection/code for the HttpFSServer server, 
authenticated and ready to use for
* the specified path and file system operation.
*
-   * @throws IOException thrown if an IO error occurrs.
+   * @throws IOException thrown if an IO error occurs.
*/
   private HttpURLConnection getConnection(URL url, String method) throws 
IOException {
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b275cfba/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
index 81b0b7a..e96bfa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
@@ -70,8 +70,8 @@ public class CheckUploadContentTypeFilter implements Filter {
* @param response servlet response.
* @param chain filter chain.
*
-   * @throws IOException thrown if an IO error occurrs.
-   * @throws ServletException thrown if a servet error occurrs.
+   * @throws IOException thrown if an IO error occurs.
+   * @throws ServletException thrown if a servlet error occurs.
*/
   @Override
   public void doFilter(ServletRequest request, ServletResponse response,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b275cfba/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
--
diff --git 

[42/50] [abbrv] hadoop git commit: Updated CHANGES.TXT for correct version of HDFS-8206

2015-04-27 Thread zjshen
Updated CHANGES.TXT for correct version of HDFS-8206


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9508901
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9508901
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9508901

Branch: refs/heads/YARN-2928
Commit: c9508901b65ea1c31525e5abc975da75c8559ef6
Parents: b275cfb
Author: Xiaoyu Yao x...@apache.org
Authored: Sat Apr 25 22:16:06 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9508901/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a0a81c0..b7199c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,8 +321,6 @@ Trunk (Unreleased)
 HDFS-7673. synthetic load generator docs give incorrect/incomplete commands
 (Brahma Reddy Battula via aw)
 
-HDFS-8206. Fix the typos in hadoop-hdfs-httpfs. (Brahma Reddy Battula via 
xyao)
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -571,6 +569,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8247. TestDiskspaceQuotaUpdate#testAppendOverTypeQuota is failing.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-8206. Fix the typos in hadoop-hdfs-httpfs. (Brahma Reddy Battula via 
xyao)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[26/50] [abbrv] hadoop git commit: HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte. Contributed by Zhe Zhang.

2015-04-27 Thread zjshen
HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cf35ee7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cf35ee7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cf35ee7

Branch: refs/heads/YARN-2928
Commit: 6cf35ee7bebd87846dec92117510c41daef4c6a0
Parents: fedc17e
Author: Andrew Wang w...@apache.org
Authored: Fri Apr 24 11:54:25 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:50 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/datanode/SimulatedFSDataset.java | 10 +--
 .../server/datanode/TestSimulatedFSDataset.java | 70 +---
 3 files changed, 54 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf35ee7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1cc31b2..317211e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -557,6 +557,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8231. StackTrace displayed at client while QuotaByStorageType exceeds
 (J.Andreina and Xiaoyu Yao via vinayakumarb)
 
+HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte.
+(Zhe Zhang via wang)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf35ee7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 344d1fe..060e055 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
  * Note the synchronization is coarse grained - it is at each method. 
  */
 public class SimulatedFSDataset implements FsDatasetSpiFsVolumeSpi {
+  public final static int BYTE_MASK = 0xff;
   static class Factory extends FsDatasetSpi.FactorySimulatedFSDataset {
 @Override
 public SimulatedFSDataset newInstance(DataNode datanode,
@@ -99,8 +100,8 @@ public class SimulatedFSDataset implements 
FsDatasetSpiFsVolumeSpi {
   }
 
   public static byte simulatedByte(Block b, long offsetInBlk) {
-byte firstByte = (byte) (b.getBlockId() % Byte.MAX_VALUE);
-return (byte) ((firstByte + offsetInBlk) % Byte.MAX_VALUE);
+byte firstByte = (byte) (b.getBlockId()  BYTE_MASK);
+return (byte) ((firstByte + offsetInBlk)  BYTE_MASK);
   }
   
   public static final String CONFIG_PROPERTY_CAPACITY =
@@ -1028,12 +1029,13 @@ public class SimulatedFSDataset implements 
FsDatasetSpiFsVolumeSpi {
 
 @Override
 public int read() throws IOException {
-  if (currentPos = length)
+  if (currentPos = length) {
 return -1;
+  }
   if (data !=null) {
 return data[currentPos++];
   } else {
-return simulatedByte(theBlock, currentPos++);
+return simulatedByte(theBlock, currentPos++)  BYTE_MASK;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf35ee7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index f76781d..8dc80d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.SequentialBlockIdGenerator;
 import 

[23/50] [abbrv] hadoop git commit: HDFS-8176. Record from/to snapshots in audit log for snapshot diff report. Contributed by J. Andreina.

2015-04-27 Thread zjshen
HDFS-8176. Record from/to snapshots in audit log for snapshot diff report. 
Contributed by J. Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fedc17e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fedc17e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fedc17e2

Branch: refs/heads/YARN-2928
Commit: fedc17e237526f9712397e58415697402144f880
Parents: 72f3618
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 24 10:23:32 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedc17e2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 56f8ec3..1cc31b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -472,6 +472,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
 
+HDFS-8176. Record from/to snapshots in audit log for snapshot diff report.
+(J. Andreina via jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedc17e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 4477dc4..229c4d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7406,8 +7406,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 } finally {
   readUnlock();
 }
-
-logAuditEvent(diffs != null, computeSnapshotDiff, null, null, null);
+String fromSnapshotRoot = (fromSnapshot == null || fromSnapshot.isEmpty()) 
?
+path : Snapshot.getSnapshotPath(path, fromSnapshot);
+String toSnapshotRoot = (toSnapshot == null || toSnapshot.isEmpty()) ?
+path : Snapshot.getSnapshotPath(path, toSnapshot);
+logAuditEvent(diffs != null, computeSnapshotDiff, fromSnapshotRoot,
+toSnapshotRoot, null);
 return diffs;
   }
   



[08/50] [abbrv] hadoop git commit: HDFS-8215. Refactor NamenodeFsck#check method. Contributed by Takanobu Asanuma

2015-04-27 Thread zjshen
HDFS-8215. Refactor NamenodeFsck#check method.  Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86dbb934
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86dbb934
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86dbb934

Branch: refs/heads/YARN-2928
Commit: 86dbb9342200fd44c1ceceb15d229ba525c4c9ea
Parents: 2ff751d
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 23 14:19:33 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:47 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/NamenodeFsck.java  | 164 ---
 2 files changed, 107 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86dbb934/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e00f67e..ff114dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -464,6 +464,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-4448. Allow HA NN to start in secure mode with wildcard address
 configured (atm via asuresh)
 
+HDFS-8215. Refactor NamenodeFsck#check method.  (Takanobu Asanuma
+via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86dbb934/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index afaec87..23fea12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -428,36 +428,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   @VisibleForTesting
   void check(String parent, HdfsFileStatus file, Result res) throws 
IOException {
 String path = file.getFullName(parent);
-boolean isOpen = false;
-
 if (file.isDir()) {
-  if (snapshottableDirs != null  snapshottableDirs.contains(path)) {
-String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path
-+ Path.SEPARATOR)
-+ HdfsConstants.DOT_SNAPSHOT_DIR;
-HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(
-snapshotPath);
-check(snapshotPath, snapshotFileInfo, res);
-  }
-  byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
-  DirectoryListing thisListing;
-  if (showFiles) {
-out.println(path +  dir);
-  }
-  res.totalDirs++;
-  do {
-assert lastReturnedName != null;
-thisListing = namenode.getRpcServer().getListing(
-path, lastReturnedName, false);
-if (thisListing == null) {
-  return;
-}
-HdfsFileStatus[] files = thisListing.getPartialListing();
-for (int i = 0; i  files.length; i++) {
-  check(path, files[i], res);
-}
-lastReturnedName = thisListing.getLastName();
-  } while (thisListing.hasMore());
+  checkDir(path, res);
   return;
 }
 if (file.isSymlink()) {
@@ -467,9 +439,47 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   res.totalSymlinks++;
   return;
 }
+LocatedBlocks blocks = getBlockLocations(path, file);
+if (blocks == null) { // the file is deleted
+  return;
+}
+collectFileSummary(path, file, res, blocks);
+collectBlocksSummary(parent, file, res, blocks);
+  }
+
+  private void checkDir(String path, Result res) throws IOException {
+if (snapshottableDirs != null  snapshottableDirs.contains(path)) {
+  String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path
+  + Path.SEPARATOR)
+  + HdfsConstants.DOT_SNAPSHOT_DIR;
+  HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(
+  snapshotPath);
+  check(snapshotPath, snapshotFileInfo, res);
+}
+byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
+DirectoryListing thisListing;
+if (showFiles) {
+  out.println(path +  dir);
+}
+res.totalDirs++;
+do {
+  assert lastReturnedName != null;
+  thisListing = namenode.getRpcServer().getListing(
+  path, lastReturnedName, 

[12/50] [abbrv] hadoop git commit: HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on post-HDFS-7915 DataNode (cmccabe)

2015-04-27 Thread zjshen
HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on post-HDFS-7915 
DataNode (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b512bad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b512bad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b512bad7

Branch: refs/heads/YARN-2928
Commit: b512bad705759d23fbe4de9bf668b96d26a55cfa
Parents: dbf031f
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 23 19:03:44 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 27 14:18:48 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |  6 ++-
 .../hdfs/protocol/datatransfer/Receiver.java|  3 +-
 .../shortcircuit/TestShortCircuitCache.java | 44 
 4 files changed, 54 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b512bad7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c3cfe1..b0a0a50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -589,6 +589,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
 (surendra singh lilhore via szetszwo)
 
+HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
+post-HDFS-7915 DataNode (cmccabe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b512bad7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 5175a87..714cd68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -77,6 +77,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 public void injectRequestFileDescriptorsFailure() throws IOException {
   // do nothing
 }
+public boolean getSupportsReceiptVerification() {
+  return true;
+}
   }
 
   @VisibleForTesting
@@ -533,7 +536,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 final DataOutputStream out =
 new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
 SlotId slotId = slot == null ? null : slot.getSlotId();
-new Sender(out).requestShortCircuitFds(block, token, slotId, 1, true);
+new Sender(out).requestShortCircuitFds(block, token, slotId, 1,
+failureInjector.getSupportsReceiptVerification());
 DataInputStream in = new DataInputStream(peer.getInputStream());
 BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
 PBHelper.vintPrefixed(in));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b512bad7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
index 31bdc5e..a6fbb29 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
@@ -186,7 +186,8 @@ public abstract class Receiver implements 
DataTransferProtocol {
 try {
   requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
   PBHelper.convert(proto.getHeader().getToken()),
-  slotId, proto.getMaxVersion(), true);
+  slotId, proto.getMaxVersion(),
+  proto.getSupportsReceiptVerification());
 } finally {
   if (traceScope != null) traceScope.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b512bad7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 

[2/2] hadoop git commit: HDFS-8232. Missing datanode counters when using Metrics2 sink interface. Contributed by Anu Engineer.

2015-04-27 Thread cnauroth
HDFS-8232. Missing datanode counters when using Metrics2 sink interface. 
Contributed by Anu Engineer.

(cherry picked from commit feb68cb5470dc3e6c16b6bc1549141613e360601)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a82addd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a82addd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a82addd6

Branch: refs/heads/branch-2
Commit: a82addd61471b6a0c2566543d1094e33f1152820
Parents: 8ee632c
Author: cnauroth cnaur...@apache.org
Authored: Mon Apr 27 16:48:13 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Apr 27 16:48:25 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  27 
 .../datanode/metrics/DataNodeMetricHelper.java  |  79 +++
 .../server/datanode/metrics/FSDatasetMBean.java |   3 +-
 .../server/datanode/SimulatedFSDataset.java |  20 ++-
 .../datanode/TestDataNodeFSDataSetSink.java | 136 +++
 .../extdataset/ExternalDatasetImpl.java |  19 ++-
 7 files changed, 281 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82addd6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7fb5830..a3074ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -250,6 +250,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8205. CommandFormat#parse() should not parse option as
 value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-8232. Missing datanode counters when using Metrics2 sink interface.
+(Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a82addd6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 3e74001..4764bbd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
@@ -106,6 +107,9 @@ import 
org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
@@ -318,6 +322,13 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 lazyWriter = new Daemon(new LazyWriter(conf));
 lazyWriter.start();
 registerMBean(datanode.getDatanodeUuid());
+
+// Add a Metrics2 Source Interface. This is same
+// data as MXBean. We can remove the registerMbean call
+// in a release where we can break backward compatibility
+MetricsSystem ms = DefaultMetricsSystem.instance();
+ms.register(FSDatasetState, FSDatasetState, this);
+
 localFS = FileSystem.getLocal(conf);
 blockPinningEnabled = conf.getBoolean(
   DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
@@ -638,6 +649,22 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 return cacheManager.getNumBlocksFailedToUncache();
   }
 
+  /**
+   * Get metrics from the metrics source
+   *
+   * @param collector to contain the resulting metrics snapshot
+   * @param all if true, return all metrics even if unchanged.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+try {
+  

hadoop git commit: YARN-2740. Fix NodeLabelsManager to properly handle node label modifications when distributed node label configuration enabled. (Naganarasimha G R via wangda)

2015-04-27 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9fc32c5c4 - db1b674b5


YARN-2740. Fix NodeLabelsManager to properly handle node label modifications 
when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db1b674b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db1b674b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db1b674b

Branch: refs/heads/trunk
Commit: db1b674b50ddecf2774f4092d677c412722bdcb1
Parents: 9fc32c5
Author: Wangda Tan wan...@apache.org
Authored: Mon Apr 27 16:24:18 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon Apr 27 16:24:38 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../nodelabels/CommonNodeLabelsManager.java |  20 +++-
 .../nodelabels/FileSystemNodeLabelsStore.java   |  16 ++-
 .../hadoop/yarn/nodelabels/NodeLabelsStore.java |  13 +-
 .../DummyCommonNodeLabelsManager.java   |   3 +-
 .../nodelabels/TestCommonNodeLabelsManager.java |  25 
 .../TestFileSystemNodeLabelsStore.java  |  34 ++
 .../server/resourcemanager/AdminService.java|  49 +---
 .../resourcemanager/ResourceTrackerService.java |  15 +--
 .../resourcemanager/webapp/RMWebServices.java   | 119 ++-
 .../resourcemanager/TestRMAdminService.java |  68 +++
 .../nodelabels/NullRMNodeLabelsManager.java |   3 +-
 .../webapp/TestRMWebServices.java   |   2 +
 .../webapp/TestRMWebServicesNodeLabels.java | 107 ++---
 15 files changed, 379 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db1b674b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1ac7a13..9039460 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -265,6 +265,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
 (zhijie shen via xgong)
 
+YARN-2740. Fix NodeLabelsManager to properly handle node label 
modifications 
+when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db1b674b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a7f485d..eb568b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1779,6 +1779,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NODELABEL_CONFIGURATION_TYPE =
   CENTALIZED_NODELABEL_CONFIGURATION_TYPE;
 
+  @Private
+  public static boolean isDistributedNodeLabelConfiguration(Configuration 
conf) {
+return DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE.equals(conf.get(
+NODELABEL_CONFIGURATION_TYPE, DEFAULT_NODELABEL_CONFIGURATION_TYPE));
+  }
+
   public YarnConfiguration() {
 super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db1b674b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 7493169..f2ff0f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -97,6 +97,8 @@ public class CommonNodeLabelsManager extends AbstractService {
   protected NodeLabelsStore store;
   private boolean nodeLabelsEnabled = false;
 
+  private boolean isDistributedNodeLabelConfiguration = false;
+
   /**
* A codeHost/code can have multiple 

hadoop git commit: YARN-2740. Fix NodeLabelsManager to properly handle node label modifications when distributed node label configuration enabled. (Naganarasimha G R via wangda)

2015-04-27 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 32dc13d90 - 8ee632caa


YARN-2740. Fix NodeLabelsManager to properly handle node label modifications 
when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)

(cherry picked from commit db1b674b50ddecf2774f4092d677c412722bdcb1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ee632ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ee632ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ee632ca

Branch: refs/heads/branch-2
Commit: 8ee632caa79b92b1af98684f83b01c3447a119ee
Parents: 32dc13d
Author: Wangda Tan wan...@apache.org
Authored: Mon Apr 27 16:24:18 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon Apr 27 16:42:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../nodelabels/CommonNodeLabelsManager.java |  20 +++-
 .../nodelabels/FileSystemNodeLabelsStore.java   |  16 ++-
 .../hadoop/yarn/nodelabels/NodeLabelsStore.java |  13 +-
 .../DummyCommonNodeLabelsManager.java   |   3 +-
 .../nodelabels/TestCommonNodeLabelsManager.java |  25 
 .../TestFileSystemNodeLabelsStore.java  |  34 ++
 .../server/resourcemanager/AdminService.java|  49 +---
 .../resourcemanager/ResourceTrackerService.java |  15 +--
 .../resourcemanager/webapp/RMWebServices.java   | 119 ++-
 .../resourcemanager/TestRMAdminService.java |  68 +++
 .../nodelabels/NullRMNodeLabelsManager.java |   3 +-
 .../webapp/TestRMWebServices.java   |   2 +
 .../webapp/TestRMWebServicesNodeLabels.java | 107 ++---
 15 files changed, 379 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee632ca/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 20de1ed..ca9247f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -217,6 +217,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
 (zhijie shen via xgong)
 
+YARN-2740. Fix NodeLabelsManager to properly handle node label 
modifications 
+when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee632ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c8f9648..4dd01d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1779,6 +1779,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NODELABEL_CONFIGURATION_TYPE =
   CENTALIZED_NODELABEL_CONFIGURATION_TYPE;
 
+  @Private
+  public static boolean isDistributedNodeLabelConfiguration(Configuration 
conf) {
+return DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE.equals(conf.get(
+NODELABEL_CONFIGURATION_TYPE, DEFAULT_NODELABEL_CONFIGURATION_TYPE));
+  }
+
   public YarnConfiguration() {
 super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee632ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 7493169..f2ff0f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -97,6 +97,8 @@ public class CommonNodeLabelsManager extends AbstractService {
   protected NodeLabelsStore store;
   private boolean nodeLabelsEnabled = false;
 
+  private boolean 

[1/2] hadoop git commit: HDFS-8232. Missing datanode counters when using Metrics2 sink interface. Contributed by Anu Engineer.

2015-04-27 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8ee632caa - a82addd61
  refs/heads/trunk db1b674b5 - feb68cb54


HDFS-8232. Missing datanode counters when using Metrics2 sink interface. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb68cb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb68cb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb68cb5

Branch: refs/heads/trunk
Commit: feb68cb5470dc3e6c16b6bc1549141613e360601
Parents: db1b674
Author: cnauroth cnaur...@apache.org
Authored: Mon Apr 27 16:48:13 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Apr 27 16:48:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  27 
 .../datanode/metrics/DataNodeMetricHelper.java  |  79 +++
 .../server/datanode/metrics/FSDatasetMBean.java |   3 +-
 .../server/datanode/SimulatedFSDataset.java |  20 ++-
 .../datanode/TestDataNodeFSDataSetSink.java | 136 +++
 .../extdataset/ExternalDatasetImpl.java |  19 ++-
 7 files changed, 281 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d56ea0c..326de0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8205. CommandFormat#parse() should not parse option as
 value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-8232. Missing datanode counters when using Metrics2 sink interface.
+(Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 8869f5a..b87daec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -71,6 +71,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
@@ -104,6 +105,9 @@ import 
org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
@@ -316,6 +320,13 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 lazyWriter = new Daemon(new LazyWriter(conf));
 lazyWriter.start();
 registerMBean(datanode.getDatanodeUuid());
+
+// Add a Metrics2 Source Interface. This is same
+// data as MXBean. We can remove the registerMbean call
+// in a release where we can break backward compatibility
+MetricsSystem ms = DefaultMetricsSystem.instance();
+ms.register(FSDatasetState, FSDatasetState, this);
+
 localFS = FileSystem.getLocal(conf);
 blockPinningEnabled = conf.getBoolean(
   DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
@@ -636,6 +647,22 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 return cacheManager.getNumBlocksFailedToUncache();
   }
 
+  /**
+   * Get metrics from the metrics source
+   *
+   * @param collector to contain the resulting metrics snapshot
+   * @param all if true, return all metrics even if unchanged.
+   */
+  @Override
+  public void 

hadoop git commit: MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM tokens when they roll-over. Contributed by Jason Lowe.

2015-04-27 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4ddcc7e5b - 3a7bfdf31


MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM 
tokens when they roll-over. Contributed by Jason Lowe.

(cherry picked from commit 9fc32c5c4d1d5f50c605bdb0e3b13f44c86660c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a7bfdf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a7bfdf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a7bfdf3

Branch: refs/heads/branch-2.7
Commit: 3a7bfdf3180abae27cfcbf0dab7ff743a044994d
Parents: 4ddcc7e
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 27 14:58:16 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 27 15:01:15 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../v2/app/local/LocalContainerAllocator.java   |  28 +++-
 .../app/local/TestLocalContainerAllocator.java  | 152 +--
 3 files changed, 172 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a7bfdf3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 518b6e3..2af375d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -17,6 +17,9 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM
+tokens when they roll-over. (Jason Lowe via vinodkv)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a7bfdf3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 74dfb39..aed1023 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -18,11 +18,13 @@
 
 package org.apache.hadoop.mapreduce.v2.app.local;
 
+import java.io.IOException;
 import java.util.ArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
@@ -35,17 +37,22 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 
 /**
  * Allocates containers locally. Doesn't allocate a real container;
@@ -99,8 +106,9 @@ public class LocalContainerAllocator extends RMCommunicator
 AllocateRequest.newInstance(this.lastResponseID,
   super.getApplicationProgress(), new 

hadoop git commit: MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.

2015-04-27 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5e67c4d38 - 9a3dda3d3


MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a3dda3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a3dda3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a3dda3d

Branch: refs/heads/trunk
Commit: 9a3dda3d345b94cf6f9062c00395500596829cf1
Parents: 5e67c4d
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Apr 28 01:47:37 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Apr 28 01:47:37 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../src/site/markdown/MapReduceTutorial.md  | 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3dda3d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f9488fb..f1d0493 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -352,6 +352,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
 missing directory. (Craig Welch via devaraj)
 
+MAPREDUCE-6341. Fix typo in mapreduce tutorial. (John Michael Luy
+via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3dda3d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index ccc9590..cd087d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -210,7 +210,9 @@ Assuming that:
 
 Sample text-files as input:
 
-$ bin/hadoop fs -ls /user/joe/wordcount/input/ 
/user/joe/wordcount/input/file01 /user/joe/wordcount/input/file02
+$ bin/hadoop fs -ls /user/joe/wordcount/input/
+/user/joe/wordcount/input/file01
+/user/joe/wordcount/input/file02
 
 $ bin/hadoop fs -cat /user/joe/wordcount/input/file01
 Hello World Bye World
@@ -224,12 +226,12 @@ Run the application:
 
 Output:
 
-$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0`
+$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0
 Bye 1
 Goodbye 1
 Hadoop 2
 Hello 2
-World 2`
+World 2
 
 Applications can specify a comma separated list of paths which would be 
present in the current working directory of the task using the option `-files`. 
The `-libjars` option allows applications to add jars to the classpaths of the 
maps and reduces. The option `-archives` allows them to pass comma separated 
list of archives as arguments. These archives are unarchived and a link with 
name of the archive is created in the current working directory of tasks. More 
details about the command line options are available at [Commands 
Guide](../../hadoop-project-dist/hadoop-common/CommandsManual.html).
 
@@ -288,13 +290,13 @@ The output of the first map:
 
  Bye, 1
  Hello, 1
- World, 2`
+ World, 2
 
 The output of the second map:
 
  Goodbye, 1
  Hadoop, 2
- Hello, 1`
+ Hello, 1
 
 ```java
 public void reduce(Text key, IterableIntWritable values,



hadoop git commit: MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.

2015-04-27 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 84ae26573 - fa9def5a2


MAPREDUCE-6341. Fix typo in mapreduce tutorial. Contributed by John Michael Luy.

(cherry picked from commit 9a3dda3d345b94cf6f9062c00395500596829cf1)

Conflicts:

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa9def5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa9def5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa9def5a

Branch: refs/heads/branch-2
Commit: fa9def5a2ad3ef655c2fdfc601d84853653cdce0
Parents: 84ae265
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Apr 28 01:47:37 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Apr 28 01:58:12 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../src/site/markdown/MapReduceTutorial.md  | 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9def5a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bdbe3c5..d0411b3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -104,6 +104,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
 missing directory. (Craig Welch via devaraj)
 
+MAPREDUCE-6341. Fix typo in mapreduce tutorial. (John Michael Luy
+via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9def5a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index 0f24549..4db06e3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -210,7 +210,9 @@ Assuming that:
 
 Sample text-files as input:
 
-$ bin/hadoop fs -ls /user/joe/wordcount/input/ 
/user/joe/wordcount/input/file01 /user/joe/wordcount/input/file02
+$ bin/hadoop fs -ls /user/joe/wordcount/input/
+/user/joe/wordcount/input/file01
+/user/joe/wordcount/input/file02
 
 $ bin/hadoop fs -cat /user/joe/wordcount/input/file01
 Hello World Bye World
@@ -224,12 +226,12 @@ Run the application:
 
 Output:
 
-$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0`
+$ bin/hadoop fs -cat /user/joe/wordcount/output/part-r-0
 Bye 1
 Goodbye 1
 Hadoop 2
 Hello 2
-World 2`
+World 2
 
 Applications can specify a comma separated list of paths which would be 
present in the current working directory of the task using the option `-files`. 
The `-libjars` option allows applications to add jars to the classpaths of the 
maps and reduces. The option `-archives` allows them to pass comma separated 
list of archives as arguments. These archives are unarchived and a link with 
name of the archive is created in the current working directory of tasks. More 
details about the command line options are available at [Commands 
Guide](../../hadoop-project-dist/hadoop-common/CommandsManual.html).
 
@@ -288,13 +290,13 @@ The output of the first map:
 
  Bye, 1
  Hello, 1
- World, 2`
+ World, 2
 
 The output of the second map:
 
  Goodbye, 1
  Hadoop, 2
- Hello, 1`
+ Hello, 1
 
 ```java
 public void reduce(Text key, IterableIntWritable values,



hadoop git commit: YARN-3530. ATS throws exception on trying to filter results without otherinfo. Contributed by zhijie shen

2015-04-27 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9a3dda3d3 - 7f07c4d81


YARN-3530. ATS throws exception on trying to filter results without
otherinfo. Contributed by zhijie shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f07c4d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f07c4d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f07c4d8

Branch: refs/heads/trunk
Commit: 7f07c4d81023e3bf4bf8980e64cc9420ec31cf55
Parents: 9a3dda3
Author: Xuan xg...@apache.org
Authored: Mon Apr 27 10:36:42 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Apr 27 10:36:42 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/timeline/LeveldbTimelineStore.java   | 34 ++-
 .../server/timeline/TimelineStoreTestUtils.java | 99 ++--
 3 files changed, 104 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f07c4d8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 87db291..fdc3f4a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -271,6 +271,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3464. Race condition in LocalizerRunner kills localizer before 
 localizing all resources. (Zhihai Xu via kasha)
 
+YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
+(zhijie shen via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f07c4d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index d521f70..8cfa0c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -357,6 +357,9 @@ public class LeveldbTimelineStore extends AbstractService
   iterator = new LeveldbIterator(db);
   iterator.seek(prefix);
 
+  if (fields == null) {
+fields = EnumSet.allOf(Field.class);
+  }
   return getEntity(entityId, entityType, revStartTime, fields, iterator,
   prefix, prefix.length);
 } catch(DBException e) {
@@ -373,10 +376,6 @@ public class LeveldbTimelineStore extends AbstractService
   private static TimelineEntity getEntity(String entityId, String entityType,
   Long startTime, EnumSetField fields, LeveldbIterator iterator,
   byte[] prefix, int prefixlen) throws IOException {
-if (fields == null) {
-  fields = EnumSet.allOf(Field.class);
-}
-
 TimelineEntity entity = new TimelineEntity();
 boolean events = false;
 boolean lastEvent = false;
@@ -590,6 +589,25 @@ public class LeveldbTimelineStore extends AbstractService
   String entityType, Long limit, Long starttime, Long endtime,
   String fromId, Long fromTs, CollectionNameValuePair secondaryFilters,
   EnumSetField fields, CheckAcl checkAcl) throws IOException {
+// Even if other info and primary filter fields are not included, we
+// still need to load them to match secondary filters when they are
+// non-empty
+if (fields == null) {
+  fields = EnumSet.allOf(Field.class);
+}
+boolean addPrimaryFilters = false;
+boolean addOtherInfo = false;
+if (secondaryFilters != null  secondaryFilters.size()  0) {
+  if (!fields.contains(Field.PRIMARY_FILTERS)) {
+fields.add(Field.PRIMARY_FILTERS);
+addPrimaryFilters = true;
+  }
+  if (!fields.contains(Field.OTHER_INFO)) {
+fields.add(Field.OTHER_INFO);
+addOtherInfo = true;
+  }
+}
+
 LeveldbIterator iterator = null;
 try {
   KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
@@ -690,6 +708,14 @@ public class LeveldbTimelineStore extends AbstractService
 entity.setDomainId(DEFAULT_DOMAIN_ID);
   }
   if (checkAcl == null || checkAcl.check(entity)) {
+// 

hadoop git commit: YARN-3530. ATS throws exception on trying to filter results without otherinfo. Contributed by zhijie shen

2015-04-27 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fa9def5a2 - 0e8af401d


YARN-3530. ATS throws exception on trying to filter results without
otherinfo. Contributed by zhijie shen

(cherry picked from commit 7f07c4d81023e3bf4bf8980e64cc9420ec31cf55)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e8af401
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e8af401
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e8af401

Branch: refs/heads/branch-2
Commit: 0e8af401d70bc6f749031fea2a62d49aac823693
Parents: fa9def5
Author: Xuan xg...@apache.org
Authored: Mon Apr 27 10:36:42 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Apr 27 10:37:40 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/timeline/LeveldbTimelineStore.java   | 34 ++-
 .../server/timeline/TimelineStoreTestUtils.java | 99 ++--
 3 files changed, 104 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e8af401/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1f85778..e18bf8d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -223,6 +223,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3464. Race condition in LocalizerRunner kills localizer before 
 localizing all resources. (Zhihai Xu via kasha)
 
+YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
+(zhijie shen via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e8af401/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index d521f70..8cfa0c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -357,6 +357,9 @@ public class LeveldbTimelineStore extends AbstractService
   iterator = new LeveldbIterator(db);
   iterator.seek(prefix);
 
+  if (fields == null) {
+fields = EnumSet.allOf(Field.class);
+  }
   return getEntity(entityId, entityType, revStartTime, fields, iterator,
   prefix, prefix.length);
 } catch(DBException e) {
@@ -373,10 +376,6 @@ public class LeveldbTimelineStore extends AbstractService
   private static TimelineEntity getEntity(String entityId, String entityType,
   Long startTime, EnumSetField fields, LeveldbIterator iterator,
   byte[] prefix, int prefixlen) throws IOException {
-if (fields == null) {
-  fields = EnumSet.allOf(Field.class);
-}
-
 TimelineEntity entity = new TimelineEntity();
 boolean events = false;
 boolean lastEvent = false;
@@ -590,6 +589,25 @@ public class LeveldbTimelineStore extends AbstractService
   String entityType, Long limit, Long starttime, Long endtime,
   String fromId, Long fromTs, CollectionNameValuePair secondaryFilters,
   EnumSetField fields, CheckAcl checkAcl) throws IOException {
+// Even if other info and primary filter fields are not included, we
+// still need to load them to match secondary filters when they are
+// non-empty
+if (fields == null) {
+  fields = EnumSet.allOf(Field.class);
+}
+boolean addPrimaryFilters = false;
+boolean addOtherInfo = false;
+if (secondaryFilters != null  secondaryFilters.size()  0) {
+  if (!fields.contains(Field.PRIMARY_FILTERS)) {
+fields.add(Field.PRIMARY_FILTERS);
+addPrimaryFilters = true;
+  }
+  if (!fields.contains(Field.OTHER_INFO)) {
+fields.add(Field.OTHER_INFO);
+addOtherInfo = true;
+  }
+}
+
 LeveldbIterator iterator = null;
 try {
   KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
@@ -690,6 +708,14 @@ public class LeveldbTimelineStore extends AbstractService
 entity.setDomainId(DEFAULT_DOMAIN_ID);
   }
  

hadoop git commit: YARN-3431. Sub resources of timeline entity needs to be passed to a separate endpoint. Contributed By Zhijie Shen.

2015-04-27 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 582211888 - fa5cc7524


YARN-3431. Sub resources of timeline entity needs to be passed to a separate 
endpoint. Contributed By Zhijie Shen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa5cc752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa5cc752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa5cc752

Branch: refs/heads/YARN-2928
Commit: fa5cc75245a6dba549620a8b26c7b4a8aed9838e
Parents: 5822118
Author: Junping Du junping...@apache.org
Authored: Mon Apr 27 11:28:32 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Mon Apr 27 11:28:32 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ApplicationAttemptEntity.java   |  13 +-
 .../timelineservice/ApplicationEntity.java  |  22 +-
 .../records/timelineservice/ClusterEntity.java  |  12 +-
 .../timelineservice/ContainerEntity.java|  13 +-
 .../api/records/timelineservice/FlowEntity.java |  80 +++--
 .../HierarchicalTimelineEntity.java | 124 +++
 .../records/timelineservice/QueueEntity.java|  36 +++
 .../records/timelineservice/TimelineEntity.java | 322 +++
 .../records/timelineservice/TimelineQueue.java  |  35 --
 .../records/timelineservice/TimelineUser.java   |  35 --
 .../api/records/timelineservice/UserEntity.java |  36 +++
 .../TestTimelineServiceRecords.java |  91 --
 .../TestTimelineServiceClientIntegration.java   |  44 ++-
 .../collector/TimelineCollectorWebService.java  |  65 +++-
 15 files changed, 654 insertions(+), 277 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5cc752/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 408b8e6..8bd73fe 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -55,6 +55,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)
 
+YARN-3431. Sub resources of timeline entity needs to be passed to a 
separate 
+endpoint. (Zhijie Shen via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5cc752/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
index 9dc0c1d..734c741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
@@ -20,16 +20,17 @@ package org.apache.hadoop.yarn.api.records.timelineservice;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement(name = appattempt)
-@XmlAccessorType(XmlAccessType.NONE)
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public class ApplicationAttemptEntity extends HierarchicalTimelineEntity {
   public ApplicationAttemptEntity() {
 super(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString());
   }
+
+  public ApplicationAttemptEntity(TimelineEntity entity) {
+super(entity);
+if 
(!entity.getType().equals(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString()))
 {
+  throw new IllegalArgumentException(Incompatible entity type:  + 
getId());
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5cc752/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
index 45ec520..183d8d8 100644
--- 

[44/50] hadoop git commit: HDFS-8156. Add/implement necessary APIs even we just have the system default schema. Contributed by Kai Zheng.

2015-04-27 Thread zhz
HDFS-8156. Add/implement necessary APIs even we just have the system default 
schema. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36cb4fad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36cb4fad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36cb4fad

Branch: refs/heads/HDFS-7285
Commit: 36cb4fad99299cdcb98b4e6d8a3ace5737025ead
Parents: 3652361
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 22 14:48:54 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:54 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 173 +++
 .../hadoop/io/erasurecode/TestECSchema.java |   2 +-
 .../hadoop/io/erasurecode/TestSchemaLoader.java |   6 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   2 +-
 .../hdfs/server/namenode/ECSchemaManager.java   |  79 -
 .../namenode/ErasureCodingZoneManager.java  |  16 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  29 +++-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   |   5 +-
 .../hadoop/hdfs/TestErasureCodingZones.java |  45 +++--
 10 files changed, 249 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cb4fad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 32077f6..f058ea7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode;
 
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -30,55 +31,80 @@ public final class ECSchema {
   public static final String CHUNK_SIZE_KEY = chunkSize;
   public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
-  private String schemaName;
-  private String codecName;
-  private MapString, String options;
-  private int numDataUnits;
-  private int numParityUnits;
-  private int chunkSize;
+  /**
+   * A friendly and understandable name that can mean what's it, also serves as
+   * the identifier that distinguish it from other schemas.
+   */
+  private final String schemaName;
+
+  /**
+   * The erasure codec name associated.
+   */
+  private final String codecName;
+
+  /**
+   * Number of source data units coded
+   */
+  private final int numDataUnits;
+
+  /**
+   * Number of parity units generated in a coding
+   */
+  private final int numParityUnits;
+
+  /**
+   * Unit data size for each chunk in a coding
+   */
+  private final int chunkSize;
+
+  /*
+   * An erasure code can have its own specific advanced parameters, subject to
+   * itself to interpret these key-value settings.
+   */
+  private final MapString, String extraOptions;
 
   /**
-   * Constructor with schema name and provided options. Note the options may
+   * Constructor with schema name and provided all options. Note the options 
may
* contain additional information for the erasure codec to interpret further.
* @param schemaName schema name
-   * @param options schema options
+   * @param allOptions all schema options
*/
-  public ECSchema(String schemaName, MapString, String options) {
+  public ECSchema(String schemaName, MapString, String allOptions) {
 assert (schemaName != null  ! schemaName.isEmpty());
 
 this.schemaName = schemaName;
 
-if (options == null || options.isEmpty()) {
+if (allOptions == null || allOptions.isEmpty()) {
   throw new IllegalArgumentException(No schema options are provided);
 }
 
-String codecName = options.get(CODEC_NAME_KEY);
+this.codecName = allOptions.get(CODEC_NAME_KEY);
 if (codecName == null || codecName.isEmpty()) {
   throw new IllegalArgumentException(No codec option is provided);
 }
 
-int dataUnits = 0, parityUnits = 0;
-try {
-  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
-dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
-  }
-} catch (NumberFormatException e) {
-  throw new IllegalArgumentException(Option value  +
-  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
-   is found. It should be an integer);
+int tmpNumDataUnits = extractIntOption(NUM_DATA_UNITS_KEY, allOptions);
+int tmpNumParityUnits = extractIntOption(NUM_PARITY_UNITS_KEY, allOptions);
+ 

[08/50] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-27 Thread zhz
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85633f86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85633f86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85633f86

Branch: refs/heads/HDFS-7285
Commit: 85633f865cf7f01673260fa475f6d72dd5ed807e
Parents: c6f4610
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:27 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85633f86/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



[15/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B) Added missed file

2015-04-27 Thread zhz
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)
Added missed file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fd973f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fd973f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fd973f2

Branch: refs/heads/HDFS-7285
Commit: 1fd973f224ef1a832c0f47b8a195ef22d4666fd2
Parents: af9472c
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 14:23:03 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:29 2015 -0700

--
 .../org/apache/hadoop/hdfs/protocol/ECInfo.java | 41 
 1 file changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd973f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
new file mode 100644
index 000..ca642c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+/**
+ * Class to provide information, such as ECSchema, for a file/block.
+ */
+public class ECInfo {
+  private final String src;
+  private final ECSchema schema;
+
+  public ECInfo(String src, ECSchema schema) {
+this.src = src;
+this.schema = schema;
+  }
+
+  public String getSrc() {
+return src;
+  }
+
+  public ECSchema getSchema() {
+return schema;
+  }
+}



[02/50] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin

2015-04-27 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eafbdc00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eafbdc00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eafbdc00

Branch: refs/heads/HDFS-7285
Commit: eafbdc003db81411036e6d722a46d6919f328a10
Parents: a3e31cb
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 2 05:12:35 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:25 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 .../src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eafbdc00/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b69e69a..01280db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -29,3 +29,6 @@
 
 HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
 ( Kai Zheng )
+
+HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
+( Xinwei Qin via Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eafbdc00/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8dc3f45..27be00e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -64,7 +64,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
is found. It should be an integer);
 }
 
@@ -74,7 +74,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_PARITY_UNITS_KEY) +  for  + NUM_PARITY_UNITS_KEY +
is found. It should be an integer);
 }
 



[07/50] hadoop git commit: HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang

2015-04-27 Thread zhz
HDFS-7839. Erasure coding: implement facilities in NameNode to create and 
manage EC zones. Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2615289
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2615289
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2615289

Branch: refs/heads/HDFS-7285
Commit: d261528922a5b67b787ed68410e8fcbedede4bd8
Parents: aa8c4c6
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 22:38:29 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:26 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  15 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  16 ++
 .../BlockStoragePolicySuite.java|   5 -
 .../hdfs/server/common/HdfsServerConstants.java |   2 +
 .../namenode/ErasureCodingZoneManager.java  | 112 ++
 .../hdfs/server/namenode/FSDirRenameOp.java |   2 +
 .../hdfs/server/namenode/FSDirectory.java   |  26 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  40 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  10 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |   9 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  12 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 151 +++
 .../TestBlockInitialEncoding.java   |  75 -
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |  23 ++-
 .../namenode/TestRecoverStripedBlocks.java  |   7 +-
 21 files changed, 431 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2615289/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8fc9e77..da3b0e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2954,6 +2954,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
+  public void createErasureCodingZone(String src)
+  throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
+try {
+  namenode.createErasureCodingZone(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class,
+  UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
   EnumSetXAttrSetFlag flag) throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2615289/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bafb02b..8efe344 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1363,6 +1363,14 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
+   * Create an erasure coding zone (currently with hardcoded schema)
+   * TODO: Configurable and pluggable schemas (HDFS-7337)
+   */
+  @Idempotent
+  public void createErasureCodingZone(String src)
+  throws IOException;
+
+  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2615289/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 

[21/50] hadoop git commit: HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. Contributed by Li Bo

2015-04-27 Thread zhz
HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. 
Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e107932
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e107932
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e107932

Branch: refs/heads/HDFS-7285
Commit: 1e10793284cd0fc35de6e879ae23762e5ac977e6
Parents: a5ebdd9
Author: Kai Zheng kai.zh...@intel.com
Authored: Sat Apr 11 01:03:37 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  13 +-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |  26 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 439 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  12 +-
 .../apache/hadoop/hdfs/StripedDataStreamer.java | 241 ++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 311 +
 7 files changed, 1031 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e107932/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 1e695c4..753795a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -56,4 +56,6 @@
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
-HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
+
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e107932/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index d9b8ee7..5906947 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -270,8 +270,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
   Preconditions.checkNotNull(stat, HdfsFileStatus should not be null!);
-  final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
-  flag, progress, checksum, favoredNodes);
+  final DFSOutputStream out;
+  if(stat.getReplication() == 0) {
+out = new DFSStripedOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  } else {
+out = new DFSOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  }
   out.start();
   return out;
 } finally {
@@ -349,6 +355,9 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
+   if(stat.getReplication() == 0) {
+  throw new IOException(Not support appending to a striping layout file 
yet.);
+}
 try {
   final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
   progress, lastBlock, stat, checksum);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e107932/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 22055c3..9cd1ec1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -113,6 +114,19 @@ class DFSPacket {
 dataPos += len;
   }
 
+  synchronized 

[03/50] hadoop git commit: HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui Zheng.

2015-04-27 Thread zhz
HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3e31cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3e31cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3e31cb8

Branch: refs/heads/HDFS-7285
Commit: a3e31cb848e37ebb6d6d5bf859ad8e90fd102d2b
Parents: d250658
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 31 10:46:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:25 2015 -0700

--
 .../server/namenode/TestFSEditLogLoader.java| 157 +++
 1 file changed, 157 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3e31cb8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 833ef95..d3cb749 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -39,14 +39,18 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -414,4 +418,157 @@ public class TestFSEditLogLoader {
   fromByte(code), FSEditLogOpCodes.fromByte(code));
 }
   }
+
+  @Test
+  public void testAddNewStripedBlock() throws IOException{
+// start a cluster
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  FSNamesystem fns = cluster.getNamesystem();
+
+  String testDir = /ec;
+  String testFile = testfile_001;
+  String testFilePath = testDir + / + testFile;
+  String clientName = testUser1;
+  String clientMachine = testMachine1;
+  long blkId = 1;
+  long blkNumBytes = 1024;
+  long timestamp = 1426222918;
+  short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+  short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+  //set the storage policy of the directory
+  fs.mkdir(new Path(testDir), new FsPermission(755));
+  fs.setStoragePolicy(new Path(testDir),
+  HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+  // Create a file with striped block
+  Path p = new Path(testFilePath);
+  DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+
+  fns.enterSafeMode(false);
+  fns.saveNamespace(0, 0);
+  fns.leaveSafeMode();
+
+  // Add a striped block to the file
+  BlockInfoStriped stripedBlk = new BlockInfoStriped(
+  new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+  INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+  file.toUnderConstruction(clientName, clientMachine);
+  file.getStripedBlocksFeature().addBlock(stripedBlk);
+  fns.getEditLog().logAddBlock(testFilePath, file);
+  file.toCompleteFile(System.currentTimeMillis());
+
+  //If the block by loaded is the same as above it means that
+  //we have successfully applied the edit log to the fsimage.
+  cluster.restartNameNodes();
+  cluster.waitActive();
+  fns = cluster.getNamesystem();
+
+  INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+  .getINode(testFilePath);
+
+  assertTrue(inodeLoaded.isWithStripedBlocks());
+
+  BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
+  assertEquals(1, 

[14/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang. Updated CHANGES-HDFS-EC-7285.txt

2015-04-27 Thread zhz
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10dd52df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10dd52df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10dd52df

Branch: refs/heads/HDFS-7285
Commit: 10dd52dfb13467427e1a0d1f5bf3f105b01eda2e
Parents: 04ef5b5
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:35:18 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:28 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10dd52df/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 68d1d32..7716728 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -33,5 +33,7 @@
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
 
+HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
+
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10dd52df/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3874cb4..9927ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,7 +49,4 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
-
-HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
-
+manage EC zones (Zhe Zhang)
\ No newline at end of file



[37/50] hadoop git commit: HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)

2015-04-27 Thread zhz
HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for 
making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cdc6c39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cdc6c39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cdc6c39

Branch: refs/heads/HDFS-7285
Commit: 7cdc6c390832a6903c1e263b9457a9b7093a8dec
Parents: bbc5979
Author: Vinayakumar B vinayakum...@apache.org
Authored: Sat Apr 18 23:20:45 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:36 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 137 ++-
 .../blockmanagement/DatanodeDescriptor.java |  31 +
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../server/protocol/BlockECRecoveryCommand.java |  80 ++-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   8 ++
 .../src/main/proto/erasurecoding.proto  |  13 ++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  88 
 .../namenode/TestRecoverStripedBlocks.java  |  10 +-
 10 files changed, 335 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cdc6c39/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 0ed61cd..40517e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -87,3 +87,6 @@
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
+
+HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
+making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cdc6c39/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index b9d87aa..0c6c97d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -100,7 +101,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTyp
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
@@ -121,6 +122,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmI
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
@@ -132,11 +134,11 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDele
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import 

[33/50] hadoop git commit: HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. Contributed by Jing Zhao.

2015-04-27 Thread zhz
HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbc59799
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbc59799
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbc59799

Branch: refs/heads/HDFS-7285
Commit: bbc59799cea75a378006d0ef53b966f62ccc0917
Parents: a4db9e1
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:13:47 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:35 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  7 --
 .../namenode/ErasureCodingZoneManager.java  | 12 +-
 .../hdfs/server/namenode/FSDirectory.java   |  6 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   | 13 ++-
 .../hdfs/server/namenode/FSImageFormat.java |  4 +---
 .../server/namenode/FSImageSerialization.java   | 13 +--
 .../blockmanagement/TestBlockInfoStriped.java   | 23 ++--
 .../hdfs/server/namenode/TestFSImage.java   |  2 +-
 8 files changed, 31 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc59799/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 9f2f5ba..23e3153 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -244,13 +244,6 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-out.writeShort(dataBlockNum);
-out.writeShort(parityBlockNum);
-super.write(out);
-  }
-
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc59799/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 0a84083..3f94227 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -54,10 +54,6 @@ public class ErasureCodingZoneManager {
 this.dir = dir;
   }
 
-  boolean getECPolicy(INodesInPath iip) throws IOException {
-return getECSchema(iip) != null;
-  }
-
   ECSchema getECSchema(INodesInPath iip) throws IOException {
 ECZoneInfo ecZoneInfo = getECZoneInfo(iip);
 return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
@@ -109,7 +105,7 @@ public class ErasureCodingZoneManager {
   throw new IOException(Attempt to create an erasure coding zone  +
   for a file.);
 }
-if (getECPolicy(srcIIP)) {
+if (getECSchema(srcIIP) != null) {
   throw new IOException(Directory  + src +  is already in an  +
   erasure coding zone.);
 }
@@ -132,8 +128,10 @@ public class ErasureCodingZoneManager {
   void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
   throws IOException {
 assert dir.hasReadLock();
-if (getECPolicy(srcIIP)
-!= getECPolicy(dstIIP)) {
+final ECSchema srcSchema = getECSchema(srcIIP);
+final ECSchema dstSchema = getECSchema(dstIIP);
+if ((srcSchema != null  !srcSchema.equals(dstSchema)) ||
+(dstSchema != null  !dstSchema.equals(srcSchema))) {
   throw new IOException(
   src +  can't be moved because the source and destination have  +
   different erasure coding policies.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc59799/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 

[09/50] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-04-27 Thread zhz
HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6f46100
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6f46100
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6f46100

Branch: refs/heads/HDFS-7285
Commit: c6f46100293ecfc0730f9fc20555910f84b6742d
Parents: c909712
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:27 2015 -0700

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6f46100/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6f46100/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 

[29/50] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-27 Thread zhz
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e652802
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e652802
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e652802

Branch: refs/heads/HDFS-7285
Commit: 6e652802e93039301b16a3d89dc24264af0d3475
Parents: e5f2e3d
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:23:07 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:33 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e652802/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 07bbd4a..9fdac98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -51,11 +51,20 @@
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 
+HDFS-7969. Erasure coding: NameNode support for lease recovery of striped
+block groups. (Zhe Zhang)
+
+HDFS-7782. Erasure coding: pread from files in striped layout.
+(Zhe Zhang and Jing Zhao via Zhe Zhang)
+
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
+HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks.
+(Jing Zhao and Zhe Zhang via Jing Zhao)
+
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
@@ -63,5 +72,11 @@
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
 ECSchemas loaded in Namenode. (vinayakumarb)
 
+HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone.
+(Vinayakumar B via Zhe Zhang)
+
+HDFS-8114. Erasure coding: Add auditlog 
FSNamesystem#createErasureCodingZone if this
+operation fails. (Rakesh R via Zhe Zhang)
+
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file



[11/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-27 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c3a8ed5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c3a8ed5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c3a8ed5

Branch: refs/heads/HDFS-7285
Commit: 0c3a8ed56e8b3c7fbf6691b0ae30d15342ef65b2
Parents: 96dbfa5
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:28 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  55 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  79 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  92 -
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 304 +++
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 11 files changed, 897 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3a8ed5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4e8f202..a9596bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -203,4 +203,8 @@ public class LocatedBlock {
 + ; locs= + Arrays.asList(locs)
 + };
   }
+
+  public boolean isStriped() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c3a8ed5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index da3b0e5..ff8bad0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -236,6 +236,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   public DfsClientConf getConf() {
@@ -371,6 +372,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (dfsClientConf.getHedgedReadThreadpoolSize()  0) {
   
this.initThreadsNumForHedgedReads(dfsClientConf.getHedgedReadThreadpoolSize());
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
   TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
@@ -3151,11 +3165,52 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
+  
+  /**
+   * Create thread pool for parallel reading in striped layout,
+   * STRIPED_READ_THREAD_POOL, if it does not already exist.
+   * @param num Number of threads for striped reads thread pool.
+   */
+  private void initThreadsNumForStripedReads(int num) {
+assert num  0;
+if (STRIPED_READ_THREAD_POOL != null) {
+ 

[17/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-27 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/534fde5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/534fde5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/534fde5c

Branch: refs/heads/HDFS-7285
Commit: 534fde5cf0635ca54f1f6413e78badbf732b8aa7
Parents: 0c3a8ed
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:29 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/534fde5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ff8bad0..f4eea49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3165,7 +3165,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
-  
+
   /**
* Create thread pool for parallel reading in striped layout,
* STRIPED_READ_THREAD_POOL, if it does not already exist.



[04/50] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-27 Thread zhz
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2506580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2506580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2506580

Branch: refs/heads/HDFS-7285
Commit: d2506580b011d82a8b19fa7b2d11ba1f95fa703d
Parents: 34bb9a4
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Tue Mar 31 15:12:09 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:25 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 40 +++-
 1 file changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2506580/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 21e4c03..a686315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -3,6 +3,44 @@
 HDFS-7347. Configurable erasure coding policy for individual files and
 directories ( Zhe Zhang via vinayakumarb )
 
-HDFS-7716. Add a test for BlockGroup support in FSImage.
+HDFS-7339. Representing striped block groups in NameNode with hierarchical
+naming protocol ( Zhe Zhang )
+
+HDFS-7652. Process block reports for erasure coded blocks (Zhe Zhang)
+
+HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info (Jing Zhao)
+
+HDFS-7749. Erasure Coding: Add striped block support in INodeFile (Jing 
Zhao)
+
+HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode
+(Jing Zhao via Zhe Zhang)
+
+HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to 
print
+striped blocks (Takuya Fukudome via jing9)
+
+HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from
+striped files (Jing Zhao)
+
+HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped
+blocks ( Kai Sasaki via jing9 )
+
+HDFS-7912. Erasure Coding: track BlockInfo instead of Block in
+UnderReplicatedBlocks and PendingReplicationBlocks (Jing Zhao)
+
+HDFS-7369. Erasure coding: distribute recovery work for striped blocks to
+DataNode (Zhe Zhang)
+
+HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks
+(GAO Rui via jing9)
+
+HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage
+( Hui Zheng via jing9 )
+
+HDFS-7616. Add a test for BlockGroup support in FSImage.
 (Takuya Fukudome via szetszwo)
 
+HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery 
striped
+blocks in NameNode (Jing Zhao)
+
+HDFS-8005. Erasure Coding: simplify striped block recovery work computation
+and add tests (Jing Zhao)
\ No newline at end of file



[28/50] hadoop git commit: HDFS-8123. Erasure Coding: Better to move EC related proto messages to a separate erasurecoding proto file (Contrubuted by Rakesh R)

2015-04-27 Thread zhz
HDFS-8123. Erasure Coding: Better to move EC related proto messages to a 
separate erasurecoding proto file (Contrubuted by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5f2e3d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5f2e3d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5f2e3d9

Branch: refs/heads/HDFS-7285
Commit: e5f2e3d92352c448455bfbad0fd732bdea1cfef7
Parents: 49e57af
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:09:16 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:33 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../ClientNamenodeProtocolTranslatorPB.java | 13 ++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +-
 .../namenode/ErasureCodingZoneManager.java  |  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto | 24 +--
 .../src/main/proto/erasurecoding.proto  | 74 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 27 ---
 9 files changed, 96 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2e3d9/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5250dfa..07bbd4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
 
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
-ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file
+ECSchemas loaded in Namenode. (vinayakumarb)
+
+HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2e3d9/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c11b963..a13a2bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -343,6 +343,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includehdfs.proto/include
   includeencryption.proto/include
   includeinotify.proto/include
+  includeerasurecoding.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2e3d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 48f0efd..169ea2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -107,12 +107,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto;
-import 

[39/50] hadoop git commit: HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 and HDFS-8169. Contributed by Zhe Zhang.

2015-04-27 Thread zhz
HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 
and HDFS-8169. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/655cf273
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/655cf273
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/655cf273

Branch: refs/heads/HDFS-7285
Commit: 655cf27380f2af610abf3bb8ff6ac1d8c426b021
Parents: 34a97a1
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 20 14:19:12 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:36 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 12 
 .../hdfs/protocol/LocatedStripedBlock.java  | 64 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 21 ++
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 21 +-
 .../hdfs/protocol/LocatedStripedBlock.java  | 73 
 .../server/blockmanagement/BlockManager.java| 25 ---
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/TestStripedINodeFile.java   |  3 +-
 8 files changed, 120 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/655cf273/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 86c8a87..dc2f1d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -176,6 +176,18 @@ public interface HdfsClientConfigKeys {
 int THREADPOOL_SIZE_DEFAULT = 0;
   }
 
+  /** dfs.client.read.striped configuration properties */
+  interface StripedRead {
+String PREFIX = Read.PREFIX + striped.;
+
+String  THREADPOOL_SIZE_KEY = PREFIX + threadpool.size;
+/**
+ * With default 6+3 schema, each normal read could span 6 DNs. So this
+ * default value accommodates 3 read streams
+ */
+int THREADPOOL_SIZE_DEFAULT = 18;
+  }
+
   /** dfs.http.client configuration properties */
   interface HttpClient {
 String  PREFIX = dfs.http.client.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/655cf273/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..93a5948
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+
+import java.util.Arrays;
+
+/**
+ * {@link LocatedBlock} with striped block support. For a striped block, each
+ * datanode storage is associated with a block in the block group. We need to
+ * record the index (in the striped block group) for each of them.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LocatedStripedBlock extends LocatedBlock {
+  private int[] blockIndices;
+
+  public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+  String[] storageIDs, StorageType[] storageTypes, int[] indices,
+  long startOffset, boolean corrupt, 

[35/50] hadoop git commit: HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed by Jing Zhao.

2015-04-27 Thread zhz
HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/501addfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/501addfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/501addfb

Branch: refs/heads/HDFS-7285
Commit: 501addfbf085fe453ddde25ee0ba9d30ef16fc7e
Parents: 0f5de42
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 17:55:19 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:35 2015 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 163 +++
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  72 +++-
 .../server/blockmanagement/BlockManager.java|  17 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 162 +++---
 4 files changed, 236 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/501addfb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index f11a657..7dc0091 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -22,10 +22,14 @@ import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -59,12 +63,12 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
*/
   private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
   private ByteBuffer[] cellBuffers;
-  private final short blockGroupBlocks = HdfsConstants.NUM_DATA_BLOCKS
+  private final short numAllBlocks = HdfsConstants.NUM_DATA_BLOCKS
   + HdfsConstants.NUM_PARITY_BLOCKS;
-  private final short blockGroupDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
+  private final short numDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
   private int curIdx = 0;
   /* bytes written in current block group */
-  private long currentBlockGroupBytes = 0;
+  //private long currentBlockGroupBytes = 0;
 
   //TODO: Use ErasureCoder interface (HDFS-7781)
   private RawErasureEncoder encoder;
@@ -73,10 +77,6 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 return streamers.get(0);
   }
 
-  private long getBlockGroupSize() {
-return blockSize * HdfsConstants.NUM_DATA_BLOCKS;
-  }
-
   /** Construct a new output stream for creating a file. */
   DFSStripedOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
  EnumSetCreateFlag flag, Progressable progress,
@@ -84,15 +84,13 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
  throws IOException {
 super(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
 DFSClient.LOG.info(Creating striped output stream);
-if (blockGroupBlocks = 1) {
-  throw new IOException(The block group must contain more than one 
block.);
-}
+checkConfiguration();
 
-cellBuffers = new ByteBuffer[blockGroupBlocks];
+cellBuffers = new ByteBuffer[numAllBlocks];
 ListBlockingQueueLocatedBlock stripeBlocks = new ArrayList();
 
-for (int i = 0; i  blockGroupBlocks; i++) {
-  stripeBlocks.add(new 
LinkedBlockingQueueLocatedBlock(blockGroupBlocks));
+for (int i = 0; i  numAllBlocks; i++) {
+  stripeBlocks.add(new LinkedBlockingQueueLocatedBlock(numAllBlocks));
   try {
 cellBuffers[i] = 
ByteBuffer.wrap(byteArrayManager.newByteArray(cellSize));
   } catch (InterruptedException ie) {
@@ -103,29 +101,38 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   }
 }
 encoder = new RSRawEncoder();
-encoder.initialize(blockGroupDataBlocks,
-blockGroupBlocks - blockGroupDataBlocks, cellSize);
+encoder.initialize(numDataBlocks,
+numAllBlocks - numDataBlocks, cellSize);
 
-streamers = new ArrayList(blockGroupBlocks);
-for (short i = 0; i  blockGroupBlocks; i++) {
+ListStripedDataStreamer s = new 

[13/50] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin Updated CHANGES-HDFS-EC-7285.txt

2015-04-27 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04ef5b5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04ef5b5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04ef5b5e

Branch: refs/heads/HDFS-7285
Commit: 04ef5b5e78f734070af7876f9ab0d0a787607c07
Parents: 85633f8
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:34:37 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04ef5b5e/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 4e60a7c..3874cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -48,9 +48,6 @@
 HDFS-7617. Add unit tests for editlog transactions for EC 
 (Hui Zheng via Zhe Zhang)
 
-HADOOP-11782. Correct two thrown messages in ECSchema class
-(Xinwei Qin via Kai Zheng)
-
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 



[47/50] hadoop git commit: HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the last stripe is at the block group boundary. Contributed by Jing Zhao.

2015-04-27 Thread zhz
HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the last 
stripe is at the block group boundary. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fe4f7a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fe4f7a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fe4f7a3

Branch: refs/heads/HDFS-7285
Commit: 2fe4f7a3af12a6511fe124a90e595f7f126340ae
Parents: e4ce5b0
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 23 15:43:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:55 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 51 +---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  6 +++
 3 files changed, 34 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fe4f7a3/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8977c46..48791b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -121,4 +121,7 @@
 schema. (Kai Zheng via Zhe Zhang)
 
 HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
-file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file
+file. (Kai Sasaki via Kai Zheng)
+
+HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the 
last
+stripe is at the block group boundary. (jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fe4f7a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index eeb9d7e..245dfc1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.util.DataChecksum;
@@ -278,14 +277,6 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 return numDataBlocks * cellSize;
   }
 
-  private long getCurrentBlockGroupBytes() {
-long sum = 0;
-for (int i = 0; i  numDataBlocks; i++) {
-  sum += streamers.get(i).getBytesCurBlock();
-}
-return sum;
-  }
-
   private void notSupported(String headMsg)
   throws IOException{
   throw new IOException(
@@ -347,37 +338,43 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 }
   }
 
+  /**
+   * Simply add bytesCurBlock together. Note that this result is not accurately
+   * the size of the block group.
+   */
+  private long getCurrentSumBytes() {
+long sum = 0;
+for (int i = 0; i  numDataBlocks; i++) {
+  sum += streamers.get(i).getBytesCurBlock();
+}
+return sum;
+  }
+
   private void writeParityCellsForLastStripe() throws IOException {
-final long currentBlockGroupBytes = getCurrentBlockGroupBytes();
-long parityBlkSize = StripedBlockUtil.getInternalBlockLength(
-currentBlockGroupBytes, cellSize, numDataBlocks,
-numDataBlocks + 1);
-if (parityBlkSize == 0 || currentBlockGroupBytes % stripeDataSize() == 0) {
+final long currentBlockGroupBytes = getCurrentSumBytes();
+if (currentBlockGroupBytes % stripeDataSize() == 0) {
   return;
 }
-int parityCellSize = parityBlkSize % cellSize == 0 ? cellSize :
-(int) (parityBlkSize % cellSize);
+long firstCellSize = getLeadingStreamer().getBytesCurBlock() % cellSize;
+long parityCellSize = firstCellSize  0  firstCellSize  cellSize ?
+firstCellSize : cellSize;
 
 for (int i = 0; i  numAllBlocks; i++) {
-  long internalBlkLen = StripedBlockUtil.getInternalBlockLength(
-  currentBlockGroupBytes, cellSize, numDataBlocks, i);
   // Pad zero bytes to make all cells exactly the size of parityCellSize
   // If internal 

[49/50] hadoop git commit: HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped layout. Contributed by Zhe Zhang.

2015-04-27 Thread zhz
HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped 
layout. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/355f9cb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/355f9cb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/355f9cb2

Branch: refs/heads/HDFS-7285
Commit: 355f9cb259ccb7d456a612ed177986f2eaddc6fe
Parents: cfd03ef
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 24 22:36:15 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:45:01 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  55 ++--
 .../hadoop/hdfs/DFSStripedInputStream.java  | 311 ++-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  43 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 110 ++-
 5 files changed, 465 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/355f9cb2/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index cf41a9b..e8db485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -131,3 +131,6 @@
 
 HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may 
cause 
 block id conflicts (Jing Zhao via Zhe Zhang)
+
+HDFS-8033. Erasure coding: stateful (non-positional) read from files in 
+striped layout (Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/355f9cb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 705e0b7..7f267b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -95,34 +95,34 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   public static boolean tcpReadsDisabledForTesting = false;
   private long hedgedReadOpsLoopNumForTesting = 0;
   protected final DFSClient dfsClient;
-  private AtomicBoolean closed = new AtomicBoolean(false);
-  private final String src;
-  private final boolean verifyChecksum;
+  protected AtomicBoolean closed = new AtomicBoolean(false);
+  protected final String src;
+  protected final boolean verifyChecksum;
 
   // state by stateful read only:
   // (protected by lock on this)
   /
   private DatanodeInfo currentNode = null;
-  private LocatedBlock currentLocatedBlock = null;
-  private long pos = 0;
-  private long blockEnd = -1;
+  protected LocatedBlock currentLocatedBlock = null;
+  protected long pos = 0;
+  protected long blockEnd = -1;
   private BlockReader blockReader = null;
   
 
   // state shared by stateful and positional read:
   // (protected by lock on infoLock)
   
-  private LocatedBlocks locatedBlocks = null;
+  protected LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private FileEncryptionInfo fileEncryptionInfo = null;
-  private CachingStrategy cachingStrategy;
+  protected CachingStrategy cachingStrategy;
   
 
-  private final ReadStatistics readStatistics = new ReadStatistics();
+  protected final ReadStatistics readStatistics = new ReadStatistics();
   // lock for state shared between read and pread
   // Note: Never acquire a lock on this with this lock held to avoid 
deadlocks
   //   (it's OK to acquire this lock when the lock on this is held)
-  private final Object infoLock = new Object();
+  protected final Object infoLock = new Object();
 
   /**
* Track the ByteBuffers that we have handed out to readers.
@@ -239,7 +239,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* back to the namenode to get a new list of block locations, and is
* capped at maxBlockAcquireFailures
*/
-  private int failures = 0;
+  protected int failures = 0;
 
   /* XXX Use of CocurrentHashMap is temp fix. Need to fix 
* parallel accesses to DFSInputStream (through ptreads) properly */
@@ -476,7 +476,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   /** Fetch a block from namenode and cache it */
-  private void fetchBlockAt(long offset) throws IOException {
+  protected void fetchBlockAt(long 

[30/50] hadoop git commit: HDFS-8120. Erasure coding: created util class to analyze striped block groups. Contributed by Zhe Zhang and Li Bo.

2015-04-27 Thread zhz
HDFS-8120. Erasure coding: created util class to analyze striped block groups. 
Contributed by Zhe Zhang and Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3fb010a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3fb010a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3fb010a

Branch: refs/heads/HDFS-7285
Commit: b3fb010a90d9e631406519d2e29c29981ee15f81
Parents: 3e97c58
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:59:27 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:34 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   4 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  77 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java |  34 +++--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  58 ++--
 .../server/blockmanagement/BlockManager.java|  26 +++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 138 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  91 +++-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  83 +--
 .../apache/hadoop/hdfs/TestReadStripedFile.java |  92 +++--
 .../server/namenode/TestAddStripedBlocks.java   | 107 ++
 .../namenode/TestRecoverStripedBlocks.java  |   3 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 125 +
 12 files changed, 562 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fb010a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d728fda..705e0b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1169,9 +1169,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
   updateReadStatistics(readStatistics, nread, reader);
 
-  if (nread != len) {
+  if (nread != lengths[i]) {
 throw new IOException(truncated return from reader.read():  +
-excpected  + len + , got  + nread);
+excpected  + lengths[i] + , got  + nread);
   }
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fb010a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 8a431b1..d597407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -50,7 +51,7 @@ import java.util.concurrent.Future;
  *
  * | - Striped Block Group - |
  *  blk_0  blk_1   blk_2   - A striped block group has
- *|  |   |  {@link #groupSize} blocks
+ *|  |   |  {@link #dataBlkNum} blocks
  *v  v   v
  * +--+   +--+   +--+
  * |cell_0|   |cell_1|   |cell_2|  - The logical read order should be
@@ -72,7 +73,7 @@ import java.util.concurrent.Future;
 public class DFSStripedInputStream extends DFSInputStream {
   /**
* This method plans the read portion from each block in the stripe
-   * @param groupSize The size / width of the striping group
+   * @param dataBlkNum The number of data blocks in the striping group
* @param cellSize The size of each striping cell
* @param startInBlk Starting offset in the striped block
* @param len Length of the read request
@@ -81,29 +82,29 @@ public class DFSStripedInputStream extends DFSInputStream {
* for an individual block in the group
*/
   @VisibleForTesting
-  

[24/50] hadoop git commit: HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if this operation fails. Contributed by Rakesh R.

2015-04-27 Thread zhz
HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if 
this operation fails. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49e57afd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49e57afd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49e57afd

Branch: refs/heads/HDFS-7285
Commit: 49e57afdb54954d1cefd057c0d24aaa98739cf96
Parents: bce5b8f
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:15:02 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:32 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 ++--
 1 file changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49e57afd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index cff961c..ea4db0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8130,11 +8130,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   SafeModeException, AccessControlException {
 String src = srcArg;
 HdfsFileStatus resultingStat = null;
-checkSuperuserPrivilege();
-checkOperation(OperationCategory.WRITE);
-final byte[][] pathComponents =
-FSDirectory.getPathComponentsForReservedPath(src);
-FSPermissionChecker pc = getPermissionChecker();
+FSPermissionChecker pc = null;
+byte[][] pathComponents = null;
+boolean success = false;
+try {
+  checkSuperuserPrivilege();
+  checkOperation(OperationCategory.WRITE);
+  pathComponents =
+  FSDirectory.getPathComponentsForReservedPath(src);
+  pc = getPermissionChecker();
+} catch (Throwable e) {
+  logAuditEvent(success, createErasureCodingZone, srcArg);
+  throw e;
+}
 writeLock();
 try {
   checkSuperuserPrivilege();
@@ -8148,11 +8156,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
   final INodesInPath iip = dir.getINodesInPath4Write(src, false);
   resultingStat = dir.getAuditFileInfo(iip);
+  success = true;
 } finally {
   writeUnlock();
 }
 getEditLog().logSync();
-logAuditEvent(true, createErasureCodingZone, srcArg, null, 
resultingStat);
+logAuditEvent(success, createErasureCodingZone, srcArg, null, 
resultingStat);
   }
 
   /**



[20/50] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-04-27 Thread zhz
HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a068a54b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a068a54b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a068a54b

Branch: refs/heads/HDFS-7285
Commit: a068a54b4326e3e4fdcd1cd17ffc8f04baf4893d
Parents: 1fd973f
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:30 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 4 files changed, 120 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a068a54b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 --
 
 schemas
-  schema name=RS-6-3
-k6/k
-m3/m
-codecRS/codec
-  /schema
   schema name=RS-10-4
 k10/k
 m4/m

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a068a54b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = k;
   public static final String NUM_PARITY_UNITS_KEY = m;
   public static final String CODEC_NAME_KEY = codec;
   public static final String CHUNK_SIZE_KEY = chunkSize;
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a068a54b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 

[41/50] hadoop git commit: HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.

2015-04-27 Thread zhz
HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28e7a2aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28e7a2aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28e7a2aa

Branch: refs/heads/HDFS-7285
Commit: 28e7a2aa50904a7c8f0a0e7516ebf080536920c3
Parents: 655cf27
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Apr 20 17:42:02 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:37 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  61 ---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 178 +++
 3 files changed, 100 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28e7a2aa/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c8dbf08..8f28285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -104,3 +104,6 @@
 
 HDFS-8181. createErasureCodingZone sets retryCache state as false always
 (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
+(szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28e7a2aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 2368021..d622d4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Utility class for analyzing striped block groups
  */
@@ -81,46 +83,43 @@ public class StripedBlockUtil {
   /**
* Get the size of an internal block at the given index of a block group
*
-   * @param numBytesInGroup Size of the block group only counting data blocks
+   * @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
-   * @param dataBlkNum The number of data blocks
-   * @param idxInGroup The logical index in the striped block group
+   * @param numDataBlocks The number of data blocks
+   * @param i The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
-  public static long getInternalBlockLength(long numBytesInGroup,
-  int cellSize, int dataBlkNum, int idxInGroup) {
+  public static long getInternalBlockLength(long dataSize,
+  int cellSize, int numDataBlocks, int i) {
+Preconditions.checkArgument(dataSize = 0);
+Preconditions.checkArgument(cellSize  0);
+Preconditions.checkArgument(numDataBlocks  0);
+Preconditions.checkArgument(i = 0);
 // Size of each stripe (only counting data blocks)
-final long numBytesPerStripe = cellSize * dataBlkNum;
-assert numBytesPerStripe   0:
-getInternalBlockLength should only be called on valid striped blocks;
+final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
 // share of the group
-if (numBytesInGroup % numBytesPerStripe == 0) {
-  return numBytesInGroup / dataBlkNum;
+final int lastStripeDataLen = (int)(dataSize % stripeSize);
+if (lastStripeDataLen == 0) {
+  return dataSize / numDataBlocks;
 }
 
-int numStripes = (int) ((numBytesInGroup - 1) / numBytesPerStripe + 1);
-assert numStripes = 1 : There should be at least 1 stripe;
-
-// All stripes but the last one are full stripes. The block should at least
-// contain (numStripes - 1) full cells.
-long blkSize = (numStripes - 1) * cellSize;
-
-long lastStripeLen = numBytesInGroup % numBytesPerStripe;
-// Size of parity cells should equal the size of the first cell, if it
-// is not full.
-long lastParityCellLen = Math.min(cellSize, lastStripeLen);
-
-if (idxInGroup = dataBlkNum) {
-  // for 

[23/50] hadoop git commit: HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B)

2015-04-27 Thread zhz
HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas 
loaded in Namenode. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc362ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc362ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc362ac4

Branch: refs/heads/HDFS-7285
Commit: cc362ac498b8233e99da1db0f282d1dae4d5c378
Parents: 1e10793
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 10 15:07:32 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:31 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  4 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 11 
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 +++
 .../ClientNamenodeProtocolTranslatorPB.java | 26 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  3 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   | 57 
 12 files changed, 164 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc362ac4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8c3310e..32077f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -123,12 +123,12 @@ public final class ECSchema {
 
 this.chunkSize = DEFAULT_CHUNK_SIZE;
 try {
-  if (options.containsKey(CHUNK_SIZE_KEY)) {
+  if (this.options.containsKey(CHUNK_SIZE_KEY)) {
 this.chunkSize = Integer.parseInt(options.get(CHUNK_SIZE_KEY));
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  this.options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
is found. It should be an integer);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc362ac4/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 753795a..5250dfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -58,4 +58,7 @@
 
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
-HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
+
+HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
+ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc362ac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 16f876c..994d5ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -3109,6 +3110,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECSchema[] getECSchemas() throws 

[45/50] hadoop git commit: HDFS-8136. Client gets and uses EC schema when reads and writes a stripping file. Contributed by Kai Sasaki

2015-04-27 Thread zhz
HDFS-8136. Client gets and uses EC schema when reads and writes a stripping 
file. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4ce5b05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4ce5b05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4ce5b05

Branch: refs/heads/HDFS-7285
Commit: e4ce5b05018598e5d1b288c00457d2badbf4a68a
Parents: 36cb4fa
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 24 00:19:12 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:54 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  17 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |  24 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 175 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   4 +-
 .../apache/hadoop/hdfs/TestReadStripedFile.java |   1 -
 7 files changed, 210 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4ce5b05/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b2faac0..8977c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -119,3 +119,6 @@
 
 HDFS-8156. Add/implement necessary APIs even we just have the system 
default 
 schema. (Kai Zheng via Zhe Zhang)
+
+HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
+file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4ce5b05/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d597407..d0e2b68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -21,9 +21,9 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
@@ -125,13 +125,19 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
-  private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
-  private final short dataBlkNum = HdfsConstants.NUM_DATA_BLOCKS;
-  private final short parityBlkNum = HdfsConstants.NUM_PARITY_BLOCKS;
+  private final int cellSize;
+  private final short dataBlkNum;
+  private final short parityBlkNum;
+  private final ECInfo ecInfo;
 
   DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
+// ECInfo is restored from NN just before reading striped file.
+ecInfo = dfsClient.getErasureCodingInfo(src);
+cellSize = ecInfo.getSchema().getChunkSize();
+dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug(Creating an striped input stream for file  + src);
   }
 
@@ -279,9 +285,6 @@ public class DFSStripedInputStream extends DFSInputStream {
 throw new InterruptedException(let's retry);
   }
 
-  public void setCellSize(int cellSize) {
-this.cellSize = cellSize;
-  }
 
   /**
* This class represents the portion of I/O associated with each block in the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4ce5b05/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 

[05/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8035). Contributed by Zhe Zhang

2015-04-27 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit is for HDFS-8035). Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bb19336
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bb19336
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bb19336

Branch: refs/heads/HDFS-7285
Commit: 3bb19336ae1fdf920ea59b9d370b8b82c6dac74f
Parents: d53a756
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 10:37:23 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:26 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 11 +--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java |  8 
 2 files changed, 9 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bb19336/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9bd687a..90ec426 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3550,13 +3550,12 @@ public class BlockManager {
   String src, BlockInfo[] blocks) {
 for (BlockInfo b: blocks) {
   if (!b.isComplete()) {
-final BlockInfoContiguousUnderConstruction uc =
-(BlockInfoContiguousUnderConstruction)b;
 final int numNodes = b.numNodes();
-LOG.info(BLOCK*  + b +  is not COMPLETE (ucState = 
-  + uc.getBlockUCState() + , replication# =  + numNodes
-  + (numNodes  minReplication ?   :  = )
-  +  minimum =  + minReplication + ) in file  + src);
+final int min = getMinStorageNum(b);
+final BlockUCState state = b.getBlockUCState();
+LOG.info(BLOCK*  + b +  is not COMPLETE (ucState =  + state
++ , replication# =  + numNodes + (numNodes  min ?:  = 
)
++  minimum =  + min + ) in file  + src);
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bb19336/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2aab4cb..4c2693d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3140,7 +3140,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
*/
   LocatedBlock storeAllocatedBlock(String src, long fileId, String clientName,
   ExtendedBlock previous, DatanodeStorageInfo[] targets) throws 
IOException {
-BlockInfo newBlockInfo = null;
+Block newBlock = null;
 long offset;
 checkOperation(OperationCategory.WRITE);
 waitForLoadingFSImage();
@@ -3173,8 +3173,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 ExtendedBlock.getLocalBlock(previous));
 
   // allocate new block, record block locations in INode.
-  Block newBlock = createNewBlock(isStriped);
-  newBlockInfo = saveAllocatedBlock(src, fileState.iip, newBlock, targets,
+  newBlock = createNewBlock(isStriped);
+  saveAllocatedBlock(src, fileState.iip, newBlock, targets,
   isStriped);
 
   persistNewBlock(src, pendingFile);
@@ -3185,7 +3185,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 getEditLog().logSync();
 
 // Return located block
-return makeLocatedBlock(newBlockInfo, targets, offset);
+return makeLocatedBlock(getStoredBlock(newBlock), targets, offset);
   }
 
   /*



[18/50] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-04-27 Thread zhz
HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b8b6b8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b8b6b8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b8b6b8a

Branch: refs/heads/HDFS-7285
Commit: 8b8b6b8a464d3a9de1031a250c95cdf56f13e2bf
Parents: 447dcc3
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:30 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  2 ++
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 6 files changed, 62 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b8b6b8a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b8b6b8a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private ListECSchema loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info(Loading predefined EC schema file  + schemaFile);
+LOG.info(Loading predefined EC schema file {}, schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn(Bad element in EC schema configuration file:  +
+  LOG.warn(Bad element in EC schema configuration file: {},
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath +  not found on the classpath.);
+LOG.warn({} not found on the classpath., schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase(file)) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b8b6b8a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 

[26/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by Zhe Zhang.

2015-04-27 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35665e15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35665e15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35665e15

Branch: refs/heads/HDFS-7285
Commit: 35665e15289d957d627da227ba102648cdfdc8e3
Parents: cc362ac
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 10:56:24 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:32 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java |  4 ++--
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   | 16 +---
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 ++-
 5 files changed, 18 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35665e15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 703b42e..d728fda 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1099,7 +1099,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 final int length = (int) (end - start + 1);
-actualGetFromOneDataNode(datanode, block, start, end, buf,
+actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
 new int[]{offset}, new int[]{length}, corruptedBlockMap);
   }
 
@@ -1118,7 +1118,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
*  block replica
*/
   void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long startInBlk, final long endInBlk,
+  long blockStartOffset, final long startInBlk, final long endInBlk,
   byte[] buf, int[] offsets, int[] lengths,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35665e15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 077b0f8..8a431b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -224,7 +224,7 @@ public class DFSStripedInputStream extends DFSInputStream {
* Real implementation of pread.
*/
   @Override
-  protected void fetchBlockByteRange(LocatedBlock block, long start,
+  protected void fetchBlockByteRange(long blockStartOffset, long start,
   long end, byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
@@ -234,7 +234,7 @@ public class DFSStripedInputStream extends DFSInputStream {
 int len = (int) (end - start + 1);
 
 // Refresh the striped block group
-block = getBlockGroupAt(block.getStartOffset());
+LocatedBlock block = getBlockGroupAt(blockStartOffset);
 assert block instanceof LocatedStripedBlock : NameNode +
  should return a LocatedStripedBlock for a striped file;
 LocatedStripedBlock blockGroup = (LocatedStripedBlock) block;
@@ -254,9 +254,11 @@ public class DFSStripedInputStream extends DFSInputStream {
   DatanodeInfo loc = blks[i].getLocations()[0];
   StorageType type = blks[i].getStorageTypes()[0];
   DNAddrPair dnAddr = new DNAddrPair(loc, NetUtils.createSocketAddr(
-  loc.getXferAddr(dfsClient.getConf().connectToDnViaHostname)), type);
-  CallableVoid readCallable = getFromOneDataNode(dnAddr, blks[i],
-  rp.startOffsetInBlock, rp.startOffsetInBlock + rp.readLength - 1, 
buf,
+  loc.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname())),
+  type);
+  CallableVoid readCallable = getFromOneDataNode(dnAddr,
+ 

[32/50] hadoop git commit: HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block is a striped block. Contributed by Hui Zheng.

2015-04-27 Thread zhz
HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block 
is a striped block. Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f882d151
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f882d151
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f882d151

Branch: refs/heads/HDFS-7285
Commit: f882d1516574c08ec2021dd6978223405261103d
Parents: 16946d1
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 17 12:05:31 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 27 10:42:34 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../hdfs/server/blockmanagement/BlockManager.java | 18 --
 2 files changed, 6 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f882d151/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 78ca6d3..0ed61cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -85,3 +85,5 @@
 
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
+
+HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f882d151/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dd00e6d..29ca26d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2924,15 +2924,6 @@ public class BlockManager {
   }
 
   /**
-   * Set the value of whether there are any non-EC blocks using StripedID.
-   *
-   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
-   */
-  public void hasNonEcBlockUsingStripedID(boolean has){
-hasNonEcBlockUsingStripedID = has;
-  }
-
-  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3528,7 +3519,7 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-  if ((info == null)  hasNonEcBlockUsingStripedID()){
+  if ((info == null)  hasNonEcBlockUsingStripedID){
 info = blocksMap.getStoredBlock(block);
   }
 } else {
@@ -3712,10 +3703,9 @@ public class BlockManager {
*/
   public BlockInfo addBlockCollectionWithCheck(
   BlockInfo block, BlockCollection bc) {
-if (!hasNonEcBlockUsingStripedID()){
-  if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
-hasNonEcBlockUsingStripedID(true);
-  }
+if (!hasNonEcBlockUsingStripedID  !block.isStriped() 
+BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  hasNonEcBlockUsingStripedID = true;
 }
 return addBlockCollection(block, bc);
   }



  1   2   >