hadoop git commit: MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes. Contributed by Ray Chiang.

2015-06-08 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 960b8f19c - 6afe20a7a


MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes. 
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6afe20a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6afe20a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6afe20a7

Branch: refs/heads/trunk
Commit: 6afe20a7a4bbfa30fce3e3c9873ad43201987998
Parents: 960b8f1
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Jun 8 15:06:10 2015 -0700
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon Jun 8 15:06:10 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../hadoop/mapreduce/jobhistory/TestEvents.java | 148 +--
 .../mapreduce/jobhistory/TestJobSummary.java|  10 +-
 .../mapreduce/jobhistory/AMStartedEvent.java|  24 +--
 .../mapreduce/jobhistory/AvroArrayUtils.java|   2 +-
 .../mapreduce/jobhistory/EventReader.java   |  22 +--
 .../mapreduce/jobhistory/EventWriter.java   |  24 +--
 .../mapreduce/jobhistory/JobFinishedEvent.java  |  39 ++---
 .../mapreduce/jobhistory/JobHistoryParser.java  |   2 +-
 .../jobhistory/JobInfoChangeEvent.java  |  12 +-
 .../mapreduce/jobhistory/JobInitedEvent.java|  24 +--
 .../jobhistory/JobPriorityChangeEvent.java  |  10 +-
 .../jobhistory/JobStatusChangedEvent.java   |   8 +-
 .../mapreduce/jobhistory/JobSubmittedEvent.java |  62 
 .../JobUnsuccessfulCompletionEvent.java |   4 +-
 .../jobhistory/MapAttemptFinishedEvent.java |  66 -
 .../jobhistory/ReduceAttemptFinishedEvent.java  |  70 -
 .../jobhistory/TaskAttemptFinishedEvent.java|  34 ++---
 .../jobhistory/TaskAttemptStartedEvent.java |  49 +++---
 .../TaskAttemptUnsuccessfulCompletionEvent.java |  66 -
 .../mapreduce/jobhistory/TaskFailedEvent.java   |  32 ++--
 .../mapreduce/jobhistory/TaskFinishedEvent.java |  26 ++--
 .../mapreduce/jobhistory/TaskStartedEvent.java  |  20 ++-
 .../mapreduce/jobhistory/TaskUpdatedEvent.java  |  10 +-
 24 files changed, 393 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6afe20a7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index cbf8af0..29191cd 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -459,6 +459,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6377. JHS sorting on state column not working in webUi.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes
+(Ray Chiang via ozawa).
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6afe20a7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
index bb9b56b..597f7a0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
@@ -120,65 +120,65 @@ public class TestEvents {
 new ByteArrayInputStream(getEvents(;
 HistoryEvent e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED));
-assertEquals(ID, ((JobPriorityChange) e.getDatum()).jobid.toString());
+assertEquals(ID, ((JobPriorityChange) 
e.getDatum()).getJobid().toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED));
-assertEquals(ID, ((JobStatusChanged) e.getDatum()).jobid.toString());
+assertEquals(ID, ((JobStatusChanged) 
e.getDatum()).getJobid().toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.TASK_UPDATED));
-assertEquals(ID, ((TaskUpdated) e.getDatum()).taskid.toString());
+assertEquals(ID, ((TaskUpdated) e.getDatum()).getTaskid().toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
 assertEquals(taskId,
-

hadoop git commit: YARN-3778. Fix Yarn resourcemanger CLI usage. Contributed by Brahma Reddy Battula

2015-06-08 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6afe20a7a - 2b2465dfa


YARN-3778. Fix Yarn resourcemanger CLI usage. Contributed by Brahma Reddy 
Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b2465df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b2465df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b2465df

Branch: refs/heads/trunk
Commit: 2b2465dfac1f147b6bb20d878b69a8cc3e85c8ad
Parents: 6afe20a
Author: Xuan xg...@apache.org
Authored: Mon Jun 8 15:43:03 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Jun 8 15:43:03 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 2 ++
 .../apache/hadoop/yarn/server/resourcemanager/ResourceManager.java | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2465df/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 86494cc..3ee3e77 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -510,6 +510,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3747. TestLocalDirsHandlerService should delete the created test 
directory logDir2.
 (David Moore via devaraj)
 
+YARN-3778. Fix Yarn resourcemanger CLI usage. (Brahma Reddy Battula via 
xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2465df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index c209873..4153ba1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1303,7 +1303,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   private static void printUsage(PrintStream out) {
-out.println(Usage: java ResourceManager [-format-state-store]);
+out.println(Usage: yarn resourcemanager [-format-state-store]);
 out.println(
 + [-remove-application-from-state-store appId] + \n);
   }



hadoop git commit: YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. Contributed by Karthik Kambatla (cherry picked from commit 960b8f19ca98dbcfdd30f2f1f275b8718d2e872f)

2015-06-08 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8ee50d8ca - a24ead8c6


YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. Contributed 
by Karthik Kambatla
(cherry picked from commit 960b8f19ca98dbcfdd30f2f1f275b8718d2e872f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a24ead8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a24ead8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a24ead8c

Branch: refs/heads/branch-2
Commit: a24ead8c6da728ec525fda5f8469f1131dac6d23
Parents: 8ee50d8
Author: Jian He jia...@apache.org
Authored: Mon Jun 8 14:50:58 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Jun 8 14:51:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   8 +
 .../recovery/ZKRMStateStore.java| 770 ++-
 .../recovery/RMStateStoreTestBase.java  |   3 +-
 .../recovery/TestZKRMStateStore.java|  83 +-
 .../recovery/TestZKRMStateStorePerf.java|  12 +-
 .../TestZKRMStateStoreZKClientConnections.java  | 181 +
 8 files changed, 336 insertions(+), 726 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a24ead8c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 05cab3d..a0b5323 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -246,6 +246,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-1462. AHS API and other AHS changes to handle tags for completed MR 
jobs. (xgong)
 
+YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. 
+(Karthik Kambatla via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a24ead8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2d1724c..e06ec1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -413,7 +413,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_ZK_RETRY_INTERVAL_MS =
   RM_ZK_PREFIX + retry-interval-ms;
-  public static final long DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000;
+  public static final int DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000;
 
   public static final String RM_ZK_TIMEOUT_MS = RM_ZK_PREFIX + timeout-ms;
   public static final int DEFAULT_RM_ZK_TIMEOUT_MS = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a24ead8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 8eed8f1..b0ce4a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -175,6 +175,14 @@
   artifactIdhadoop-yarn-server-web-proxy/artifactId
 /dependency
 dependency
+  groupIdorg.apache.curator/groupId
+  artifactIdcurator-client/artifactId
+/dependency
+dependency
+  groupIdorg.apache.curator/groupId
+  artifactIdcurator-test/artifactId
+/dependency
+dependency
   groupIdorg.apache.zookeeper/groupId
   artifactIdzookeeper/artifactId
 /dependency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a24ead8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 

hadoop git commit: MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes. Contributed by Ray Chiang.

2015-06-08 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a24ead8c6 - c6cdecc6b


MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes. 
Contributed by Ray Chiang.

(cherry picked from commit 6afe20a7a4bbfa30fce3e3c9873ad43201987998)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6cdecc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6cdecc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6cdecc6

Branch: refs/heads/branch-2
Commit: c6cdecc6b3a053e1e3f687148410369f32ef2339
Parents: a24ead8
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Jun 8 15:06:10 2015 -0700
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon Jun 8 15:06:23 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../hadoop/mapreduce/jobhistory/TestEvents.java | 148 +--
 .../mapreduce/jobhistory/TestJobSummary.java|  10 +-
 .../mapreduce/jobhistory/AMStartedEvent.java|  24 +--
 .../mapreduce/jobhistory/AvroArrayUtils.java|   2 +-
 .../mapreduce/jobhistory/EventReader.java   |  22 +--
 .../mapreduce/jobhistory/EventWriter.java   |  24 +--
 .../mapreduce/jobhistory/JobFinishedEvent.java  |  39 ++---
 .../mapreduce/jobhistory/JobHistoryParser.java  |   2 +-
 .../jobhistory/JobInfoChangeEvent.java  |  12 +-
 .../mapreduce/jobhistory/JobInitedEvent.java|  24 +--
 .../jobhistory/JobPriorityChangeEvent.java  |  10 +-
 .../jobhistory/JobStatusChangedEvent.java   |   8 +-
 .../mapreduce/jobhistory/JobSubmittedEvent.java |  62 
 .../JobUnsuccessfulCompletionEvent.java |   4 +-
 .../jobhistory/MapAttemptFinishedEvent.java |  66 -
 .../jobhistory/ReduceAttemptFinishedEvent.java  |  70 -
 .../jobhistory/TaskAttemptFinishedEvent.java|  34 ++---
 .../jobhistory/TaskAttemptStartedEvent.java |  49 +++---
 .../TaskAttemptUnsuccessfulCompletionEvent.java |  66 -
 .../mapreduce/jobhistory/TaskFailedEvent.java   |  32 ++--
 .../mapreduce/jobhistory/TaskFinishedEvent.java |  26 ++--
 .../mapreduce/jobhistory/TaskStartedEvent.java  |  20 ++-
 .../mapreduce/jobhistory/TaskUpdatedEvent.java  |  10 +-
 24 files changed, 393 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6cdecc6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 1ed0bf5..02c6492 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -201,6 +201,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6377. JHS sorting on state column not working in webUi.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6388. Remove deprecation warnings from JobHistoryServer classes
+(Ray Chiang via ozawa).
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6cdecc6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
index bb9b56b..597f7a0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
@@ -120,65 +120,65 @@ public class TestEvents {
 new ByteArrayInputStream(getEvents(;
 HistoryEvent e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED));
-assertEquals(ID, ((JobPriorityChange) e.getDatum()).jobid.toString());
+assertEquals(ID, ((JobPriorityChange) 
e.getDatum()).getJobid().toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED));
-assertEquals(ID, ((JobStatusChanged) e.getDatum()).jobid.toString());
+assertEquals(ID, ((JobStatusChanged) 
e.getDatum()).getJobid().toString());
 
 e = reader.getNextEvent();
 assertTrue(e.getEventType().equals(EventType.TASK_UPDATED));
-assertEquals(ID, ((TaskUpdated) e.getDatum()).taskid.toString());
+assertEquals(ID, ((TaskUpdated) e.getDatum()).getTaskid().toString());
 
 e = reader.getNextEvent();
 

[1/2] hadoop git commit: HADOOP-12054. RPC client should not retry for InvalidToken exceptions. (Contributed by Varun Saxena)

2015-06-08 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 869304dc8 - 82e772bdb
  refs/heads/trunk 2b2465dfa - 84ba1a75b


HADOOP-12054. RPC client should not retry for InvalidToken exceptions. 
(Contributed by Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84ba1a75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84ba1a75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84ba1a75

Branch: refs/heads/trunk
Commit: 84ba1a75b6bcd696dfc20aeabb6f19cb4eff6011
Parents: 2b2465d
Author: Arpit Agarwal a...@apache.org
Authored: Mon Jun 8 15:37:53 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Jun 8 15:45:23 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../apache/hadoop/io/retry/RetryPolicies.java   |  4 +
 .../java/org/apache/hadoop/ipc/TestIPC.java | 78 +---
 3 files changed, 76 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ba1a75/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 79f3178..fa6e4b7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -837,6 +837,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
 callers trying to use them. (Brahma Reddy Battula via stevel)
 
+HADOOP-12054. RPC client should not retry for InvalidToken exceptions.
+(Varun Saxena via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ba1a75/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a86f443..06dc4cb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 /**
  * p
@@ -575,6 +576,9 @@ public class RetryPolicies {
 // RetriableException or RetriableException wrapped 
 return new RetryAction(RetryAction.RetryDecision.RETRY,
   getFailoverOrRetrySleepTime(retries));
+  } else if (e instanceof InvalidToken) {
+return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
+Invalid or Cancelled Token);
   } else if (e instanceof SocketException
   || (e instanceof IOException  !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ba1a75/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index b443011..08508ae 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -62,6 +62,9 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
@@ -71,6 +74,7 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -204,18 +208,21 @@ public class TestIPC {
   

[2/2] hadoop git commit: HADOOP-12054. RPC client should not retry for InvalidToken exceptions. (Contributed by Varun Saxena)

2015-06-08 Thread arp
HADOOP-12054. RPC client should not retry for InvalidToken exceptions. 
(Contributed by Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82e772bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82e772bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82e772bd

Branch: refs/heads/branch-2
Commit: 82e772bdbb153bbfaaf459dfa1bc4dd7ab347d9e
Parents: 869304d
Author: Arpit Agarwal a...@apache.org
Authored: Mon Jun 8 15:37:53 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Jun 8 15:45:25 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../apache/hadoop/io/retry/RetryPolicies.java   |  4 +
 .../java/org/apache/hadoop/ipc/TestIPC.java | 78 +---
 3 files changed, 76 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e772bd/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6b93e54..7bfc5fa 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -353,6 +353,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
 callers trying to use them. (Brahma Reddy Battula via stevel)
 
+HADOOP-12054. RPC client should not retry for InvalidToken exceptions.
+(Varun Saxena via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e772bd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a86f443..06dc4cb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 /**
  * p
@@ -575,6 +576,9 @@ public class RetryPolicies {
 // RetriableException or RetriableException wrapped 
 return new RetryAction(RetryAction.RetryDecision.RETRY,
   getFailoverOrRetrySleepTime(retries));
+  } else if (e instanceof InvalidToken) {
+return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
+Invalid or Cancelled Token);
   } else if (e instanceof SocketException
   || (e instanceof IOException  !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e772bd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index b443011..08508ae 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -62,6 +62,9 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
@@ -71,6 +74,7 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -204,18 +208,21 @@ public class TestIPC {
   this.server = server;
   this.total = total;
 }
-
+
+protected Object returnValue(Object value) throws 

hadoop git commit: YARN-3778. Fix Yarn resourcemanger CLI usage. Contributed by Brahma Reddy Battula

2015-06-08 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c6cdecc6b - 869304dc8


YARN-3778. Fix Yarn resourcemanger CLI usage. Contributed by Brahma Reddy 
Battula

(cherry picked from commit 2b2465dfac1f147b6bb20d878b69a8cc3e85c8ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/869304dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/869304dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/869304dc

Branch: refs/heads/branch-2
Commit: 869304dc83e8bd1070420037583f66675d165ca8
Parents: c6cdecc
Author: Xuan xg...@apache.org
Authored: Mon Jun 8 15:43:03 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Jun 8 15:44:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 2 ++
 .../apache/hadoop/yarn/server/resourcemanager/ResourceManager.java | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/869304dc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a0b5323..1d1b403 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -462,6 +462,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3747. TestLocalDirsHandlerService should delete the created test 
directory logDir2.
 (David Moore via devaraj)
 
+YARN-3778. Fix Yarn resourcemanger CLI usage. (Brahma Reddy Battula via 
xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/869304dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index c209873..4153ba1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1303,7 +1303,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   private static void printUsage(PrintStream out) {
-out.println(Usage: java ResourceManager [-format-state-store]);
+out.println(Usage: yarn resourcemanager [-format-state-store]);
 out.println(
 + [-remove-application-from-state-store appId] + \n);
   }



hadoop git commit: YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. Contributed by Karthik Kambatla

2015-06-08 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0e80d5198 - 960b8f19c


YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. Contributed 
by Karthik Kambatla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/960b8f19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/960b8f19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/960b8f19

Branch: refs/heads/trunk
Commit: 960b8f19ca98dbcfdd30f2f1f275b8718d2e872f
Parents: 0e80d51
Author: Jian He jia...@apache.org
Authored: Mon Jun 8 14:50:58 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Jun 8 14:50:58 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   8 +
 .../recovery/ZKRMStateStore.java| 770 ++-
 .../recovery/RMStateStoreTestBase.java  |   3 +-
 .../recovery/TestZKRMStateStore.java|  83 +-
 .../recovery/TestZKRMStateStorePerf.java|  12 +-
 .../TestZKRMStateStoreZKClientConnections.java  | 181 +
 8 files changed, 336 insertions(+), 726 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b8f19/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f393cad..86494cc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -294,6 +294,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-1462. AHS API and other AHS changes to handle tags for completed MR 
jobs. (xgong)
 
+YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. 
+(Karthik Kambatla via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b8f19/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 72855cc..3ea1558 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -413,7 +413,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_ZK_RETRY_INTERVAL_MS =
   RM_ZK_PREFIX + retry-interval-ms;
-  public static final long DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000;
+  public static final int DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000;
 
   public static final String RM_ZK_TIMEOUT_MS = RM_ZK_PREFIX + timeout-ms;
   public static final int DEFAULT_RM_ZK_TIMEOUT_MS = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b8f19/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 76d280a..4960f95 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -175,6 +175,14 @@
   artifactIdhadoop-yarn-server-web-proxy/artifactId
 /dependency
 dependency
+  groupIdorg.apache.curator/groupId
+  artifactIdcurator-client/artifactId
+/dependency
+dependency
+  groupIdorg.apache.curator/groupId
+  artifactIdcurator-test/artifactId
+/dependency
+dependency
   groupIdorg.apache.zookeeper/groupId
   artifactIdzookeeper/artifactId
 /dependency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b8f19/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 

[27/50] hadoop git commit: HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible. (Contributed by Kengo Seki)

2015-06-08 Thread zjshen
HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible. 
(Contributed by Kengo Seki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e72a346e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e72a346e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e72a346e

Branch: refs/heads/YARN-2928
Commit: e72a346e1ea6565c4f87c6ed0afd33fa09e1c8da
Parents: 94db4f2
Author: Arpit Agarwal a...@apache.org
Authored: Thu Jun 4 10:53:16 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:57 2015 -0700

--
 dev-support/smart-apply-patch.sh| 5 -
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e72a346e/dev-support/smart-apply-patch.sh
--
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index be29c47..ebcb660 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -101,7 +101,10 @@ if grep -q ^diff --git ${PATCH_FILE}; then
   fi
   # shellcheck disable=SC2086
   git apply ${GIT_FLAGS} ${PATCH_FILE}
-  exit $?
+  if [[ $? == 0 ]]; then
+cleanup 0
+  fi
+  echo git apply failed. Going to apply the patch with: ${PATCH}
 fi
 
 # Come up with a list of changed files into $TMP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e72a346e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3bca0bc..942d9e9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -820,6 +820,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12019. update BUILDING.txt to include python for 'mvn site'
 in windows (vinayakumarb)
 
+HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible.
+(Kengo Seki via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[07/50] hadoop git commit: YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM recovery is enabled. Contributed by Rohith Sharmaks

2015-06-08 Thread zjshen
YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM recovery 
is enabled. Contributed by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6e1fd03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6e1fd03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6e1fd03

Branch: refs/heads/YARN-2928
Commit: d6e1fd0362fff1431fdc3a1116e80ad7a60bde46
Parents: 91a3b9f
Author: Jason Lowe jl...@apache.org
Authored: Wed Jun 3 19:44:07 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../yarn/server/nodemanager/NodeManager.java   | 17 +++--
 2 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e1fd03/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 21618c7..1841d80 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3725. App submission via REST API is broken in secure mode due to 
 Timeline DT service address is empty. (Zhijie Shen via wangda)
 
+YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
+recovery is enabled (Rohith Sharmaks via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e1fd03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 9f34317..2f3d361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.NodeHealthScriptRunner;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -97,6 +98,7 @@ public class NodeManager extends CompositeService
   
   private AtomicBoolean isStopping = new AtomicBoolean(false);
   private boolean rmWorkPreservingRestartEnabled;
+  private boolean shouldExitOnShutdownEvent = false;
 
   public NodeManager() {
 super(NodeManager.class.getName());
@@ -354,7 +356,16 @@ public class NodeManager extends CompositeService
 new Thread() {
   @Override
   public void run() {
-NodeManager.this.stop();
+try {
+  NodeManager.this.stop();
+} catch (Throwable t) {
+  LOG.error(Error while shutting down NodeManager, t);
+} finally {
+  if (shouldExitOnShutdownEvent
+   !ShutdownHookManager.get().isShutdownInProgress()) {
+ExitUtil.terminate(-1);
+  }
+}
   }
 }.start();
   }
@@ -563,7 +574,9 @@ public class NodeManager extends CompositeService
   nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
   ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
 SHUTDOWN_HOOK_PRIORITY);
-
+  // System exit should be called only when NodeManager is instantiated 
from
+  // main() funtion
+  this.shouldExitOnShutdownEvent = true;
   this.init(conf);
   this.start();
 } catch (Throwable t) {



[33/50] hadoop git commit: Add missing test file of YARN-3733

2015-06-08 Thread zjshen
Add missing test file of YARN-3733


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eba031e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eba031e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eba031e1

Branch: refs/heads/YARN-2928
Commit: eba031e1d13508bb62723625828fae267b043de1
Parents: 96a8d01
Author: Wangda Tan wan...@apache.org
Authored: Thu Jun 4 13:18:25 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:58 2015 -0700

--
 .../util/resource/TestResourceCalculator.java   | 125 +++
 1 file changed, 125 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eba031e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
new file mode 100644
index 000..6a0b62e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util.resource;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class TestResourceCalculator {
+  private ResourceCalculator resourceCalculator;
+
+  @Parameterized.Parameters
+  public static CollectionResourceCalculator[] getParameters() {
+return Arrays.asList(new ResourceCalculator[][] {
+{ new DefaultResourceCalculator() },
+{ new DominantResourceCalculator() } });
+  }
+
+  public TestResourceCalculator(ResourceCalculator rs) {
+this.resourceCalculator = rs;
+  }
+
+  @Test(timeout = 1)
+  public void testResourceCalculatorCompareMethod() {
+Resource clusterResource = Resource.newInstance(0, 0);
+
+// For lhs == rhs
+Resource lhs = Resource.newInstance(0, 0);
+Resource rhs = Resource.newInstance(0, 0);
+assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+true, lhs, lhs);
+
+// lhs  rhs
+lhs = Resource.newInstance(1, 1);
+rhs = Resource.newInstance(0, 0);
+assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
+true, lhs, rhs);
+
+// For lhs  rhs
+lhs = Resource.newInstance(0, 0);
+rhs = Resource.newInstance(1, 1);
+assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
+false, rhs, lhs);
+
+if (!(resourceCalculator instanceof DominantResourceCalculator)) {
+  return;
+}
+
+// verify for 2 dimensional resources i.e memory and cpu
+// dominant resource types
+lhs = Resource.newInstance(1, 0);
+rhs = Resource.newInstance(0, 1);
+assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+true, lhs, lhs);
+
+lhs = Resource.newInstance(0, 1);
+rhs = Resource.newInstance(1, 0);
+assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+true, lhs, lhs);
+
+lhs = Resource.newInstance(1, 1);
+rhs = Resource.newInstance(1, 0);
+assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
+true, lhs, rhs);
+
+lhs = Resource.newInstance(0, 1);
+rhs = Resource.newInstance(1, 1);
+assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
+false, rhs, lhs);
+
+  }
+
+
+  private void assertResourcesOperations(Resource clusterResource,
+  Resource lhs, Resource rhs, boolean lessThan, boolean lessThanOrEqual,

[08/50] hadoop git commit: YARN-3751. Fixed AppInfo to check if used resources are null. Contributed by Sunil G.

2015-06-08 Thread zjshen
YARN-3751. Fixed AppInfo to check if used resources are null. Contributed by 
Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91a3b9f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91a3b9f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91a3b9f0

Branch: refs/heads/YARN-2928
Commit: 91a3b9f0389610e31e243df0541f9e8cd8d5de87
Parents: dadcb31
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Jun 3 11:51:41 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../apache/hadoop/yarn/server/webapp/dao/AppInfo.java | 14 --
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a3b9f0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a19ba88..21618c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -570,6 +570,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3740. Fixed the typo in the configuration name:
 APPLICATION_HISTORY_PREFIX_MAX_APPS. (Xuan Gong via zjshen)
 
+YARN-3751. Fixed AppInfo to check if used resources are null. (Sunil G via
+zjshen)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a3b9f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index 0cc5f75..8f332a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -86,12 +86,14 @@ public class AppInfo {
 elapsedTime = Times.elapsed(startedTime, finishedTime);
 finalAppStatus = app.getFinalApplicationStatus();
 if (app.getApplicationResourceUsageReport() != null) {
-  runningContainers =
-  app.getApplicationResourceUsageReport().getNumUsedContainers();
-  allocatedCpuVcores = app.getApplicationResourceUsageReport()
-  .getUsedResources().getVirtualCores();
-  allocatedMemoryMB = app.getApplicationResourceUsageReport()
-  .getUsedResources().getMemory();
+  runningContainers = app.getApplicationResourceUsageReport()
+  .getNumUsedContainers();
+  if (app.getApplicationResourceUsageReport().getUsedResources() != null) {
+allocatedCpuVcores = app.getApplicationResourceUsageReport()
+.getUsedResources().getVirtualCores();
+allocatedMemoryMB = app.getApplicationResourceUsageReport()
+.getUsedResources().getMemory();
+  }
 }
 progress = app.getProgress() * 100; // in percent
 if (app.getApplicationTags() != null  
!app.getApplicationTags().isEmpty()) {



[47/50] hadoop git commit: HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris Nauroth.

2015-06-08 Thread zjshen
HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3c1478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3c1478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3c1478

Branch: refs/heads/YARN-2928
Commit: 0a3c14782b2feb3595d02debdfa1598df748bc60
Parents: 77e5bae
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 08:39:02 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:02 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java | 6 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 .../org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java| 8 
 3 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 5fd89c4..9b9e213 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -731,6 +731,12 @@ public class FileUtil {
   }
 }
 
+if (entry.isLink()) {
+  File src = new File(outputDir, entry.getLinkName());
+  HardLink.createHardLink(src, outputFile);
+  return;
+}
+
 int count;
 byte data[] = new byte[2048];
 BufferedOutputStream outputStream = new BufferedOutputStream(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 853a022..73574b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -864,6 +864,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8539. Hdfs doesnt have class 'debug' in windows.
 (Anu Engineer via cnauroth)
 
+HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 343320c..224abea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -37,11 +37,11 @@ public class TestDatanodeLayoutUpgrade {
 upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
 Configuration conf = new 
Configuration(TestDFSUpgradeFromImage.upgradeConf);
 conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + data);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + data).toURI().toString());
 conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + name);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + name).toURI().toString());
 upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
 .manageDataDfsDirs(false).manageNameDfsDirs(false), null);
   }



[11/50] hadoop git commit: Revert YARN-1462. Made RM write application tags to timeline server and exposed them to users via generic history web UI and REST API. Contributed by Xuan Gong.

2015-06-08 Thread zjshen
Revert YARN-1462. Made RM write application tags to timeline server and 
exposed them to users via generic history web UI and REST API. Contributed by 
Xuan Gong.

This reverts commit 4a9ec1a8243e2394ff7221b1c20dfaa80e9f5111.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bff83ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bff83ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bff83ca

Branch: refs/heads/YARN-2928
Commit: 2bff83caf92d2faf33522b417dd86a2ebace2d9f
Parents: 89899fe
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Jun 3 14:15:41 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:14 2015 -0700

--
 .../org/apache/hadoop/mapred/NotRunningJob.java |  2 +-
 .../mapred/TestClientServiceDelegate.java   |  4 +--
 .../apache/hadoop/mapred/TestYARNRunner.java|  2 +-
 hadoop-yarn-project/CHANGES.txt |  3 --
 .../yarn/api/records/ApplicationReport.java |  4 +--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestAHSClient.java |  8 ++---
 .../yarn/client/api/impl/TestYarnClient.java|  9 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 27 ++
 .../hadoop/yarn/api/TestApplicatonReport.java   |  3 +-
 .../ApplicationHistoryManagerImpl.java  |  2 +-
 ...pplicationHistoryManagerOnTimelineStore.java | 19 ++
 ...pplicationHistoryManagerOnTimelineStore.java |  9 -
 .../metrics/ApplicationMetricsConstants.java|  1 -
 .../metrics/ApplicationCreatedEvent.java| 10 +-
 .../metrics/SystemMetricsPublisher.java |  4 +--
 .../applicationsmanager/MockAsm.java|  2 +-
 .../metrics/TestSystemMetricsPublisher.java | 37 
 .../resourcemanager/webapp/TestRMWebApp.java|  2 +-
 .../src/site/markdown/TimelineServer.md | 30 ++--
 20 files changed, 36 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index 1b26cd3..03552e4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -90,7 +90,7 @@ public class NotRunningJob implements MRClientProtocol {
 return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
   N/A, N/A, N/A, N/A, 0, null, YarnApplicationState.NEW, N/A,
   N/A, 0, 0, FinalApplicationStatus.UNDEFINED, null, N/A, 0.0f,
-  YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null);
+  YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
   NotRunningJob(ApplicationReport applicationReport, JobState jobState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index c1c03df..b85f18d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -516,7 +516,7 @@ public class TestClientServiceDelegate {
 return ApplicationReport.newInstance(appId, attemptId, user, queue,
   appname, host, 124, null, YarnApplicationState.FINISHED,
   diagnostics, url, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
-  N/A, 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null);
+  N/A, 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
   private ApplicationReport getRunningApplicationReport(String host, int port) 

[35/50] hadoop git commit: MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the results file if its job fails. Contributed by Harsh J.

2015-06-08 Thread zjshen
MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the
results file if its job fails. Contributed by Harsh J.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0962cdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0962cdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0962cdc

Branch: refs/heads/YARN-2928
Commit: a0962cdcc6c31d6bb025dc82d733ea75ae5ed3d8
Parents: c8f7f17
Author: Devaraj K deva...@apache.org
Authored: Fri Jun 5 21:22:47 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:59 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java| 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0962cdc/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d78fb9c..12e3a3f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -361,6 +361,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6174. Combine common stream code into parent class for
 InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)
 
+MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the 
+results file if its job fails. (Harsh J via devaraj)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0962cdc/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
index 25dee6b..1a0c372 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
@@ -304,6 +304,10 @@ public class QuasiMonteCarlo extends Configured implements 
Tool {
   System.out.println(Starting Job);
   final long startTime = System.currentTimeMillis();
   job.waitForCompletion(true);
+  if (!job.isSuccessful()) {
+System.out.println(Job  + job.getJobID() +  failed!);
+System.exit(1);
+  }
   final double duration = (System.currentTimeMillis() - startTime)/1000.0;
   System.out.println(Job Finished in  + duration +  seconds);
 



[24/50] hadoop git commit: YARN-3733. Fix DominantRC#compare() does not work as expected if cluster resource is empty. (Rohith Sharmaks via wangda)

2015-06-08 Thread zjshen
YARN-3733. Fix DominantRC#compare() does not work as expected if cluster 
resource is empty. (Rohith Sharmaks via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08525ff3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08525ff3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08525ff3

Branch: refs/heads/YARN-2928
Commit: 08525ff38ae53ff7c1f48a5fcdf7906d53259c90
Parents: ea1a48a
Author: Wangda Tan wan...@apache.org
Authored: Thu Jun 4 10:22:57 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:56 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../resource/DominantResourceCalculator.java| 15 +
 .../capacity/TestCapacityScheduler.java | 58 +++-
 3 files changed, 75 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0c76206..83aa12f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -714,6 +714,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
 recovery is enabled (Rohith Sharmaks via jlowe)
 
+YARN-3733. Fix DominantRC#compare() does not work as expected if 
+cluster resource is empty. (Rohith Sharmaks via wangda)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6f5b40e..2ee95ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -53,6 +53,21 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   return 0;
 }
 
+if (isInvalidDivisor(clusterResource)) {
+  if ((lhs.getMemory()  rhs.getMemory()  lhs.getVirtualCores()  rhs
+  .getVirtualCores())
+  || (lhs.getMemory()  rhs.getMemory()  lhs.getVirtualCores()  rhs
+  .getVirtualCores())) {
+return 0;
+  } else if (lhs.getMemory()  rhs.getMemory()
+  || lhs.getVirtualCores()  rhs.getVirtualCores()) {
+return 1;
+  } else if (lhs.getMemory()  rhs.getMemory()
+  || lhs.getVirtualCores()  rhs.getVirtualCores()) {
+return -1;
+  }
+}
+
 float l = getResourceAsValue(clusterResource, lhs, true);
 float r = getResourceAsValue(clusterResource, rhs, true);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0361424..3827f85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -130,6 +130,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfoList;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 

[43/50] hadoop git commit: YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation and container reservation. (Zhihai Xu via kasha)

2015-06-08 Thread zjshen
YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation and 
container reservation. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e2c3dec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e2c3dec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e2c3dec

Branch: refs/heads/YARN-2928
Commit: 1e2c3deccb39a2a73aa5792f69c15e8072f48399
Parents: ee73b53
Author: Karthik Kambatla ka...@apache.org
Authored: Sun Jun 7 11:37:52 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FSAppAttempt.java| 128 +
 .../resourcemanager/scheduler/fair/FSQueue.java |  15 +
 .../scheduler/fair/FairScheduler.java   |  42 +--
 .../scheduler/fair/TestFairScheduler.java   | 282 +++
 5 files changed, 378 insertions(+), 92 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3643d0c..67a705c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -589,6 +589,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3766. Fixed the apps table column error of generic history web UI.
 (Xuan Gong via zjshen)
 
+YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
+and container reservation. (Zhihai Xu via kasha)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6287deb..7419446 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -541,39 +541,37 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   return container.getResource();
-} else {
-  if (!FairScheduler.fitsInMaxShare(getQueue(), capability)) {
-return Resources.none();
-  }
+}
 
-  // The desired container won't fit here, so reserve
-  reserve(request.getPriority(), node, container, reserved);
+// The desired container won't fit here, so reserve
+reserve(request.getPriority(), node, container, reserved);
 
-  return FairScheduler.CONTAINER_RESERVED;
-}
+return FairScheduler.CONTAINER_RESERVED;
   }
 
   private boolean hasNodeOrRackLocalRequests(Priority priority) {
 return getResourceRequests(priority).size()  1;
   }
 
-  private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(Node offered to app:  + getName() +  reserved:  + 
reserved);
-}
-
+  /**
+   * Whether the AM container for this app is over maxAMShare limit.
+   */
+  private boolean isOverAMShareLimit() {
 // Check the AM resource usage for the leaf queue
 if (!isAmRunning()  !getUnmanagedAM()) {
   ListResourceRequest ask = appSchedulingInfo.getAllResourceRequests();
   if (ask.isEmpty() || !getQueue().canRunAppAM(
   ask.get(0).getCapability())) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(Skipping allocation because maxAMShare limit would  +
-  be exceeded);
-}
-return Resources.none();
+return true;
   }
 }
+return false;
+  }
+
+  private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Node offered to app:  + getName() +  reserved:  + 
reserved);
+}
 
 CollectionPriority prioritiesToTry = (reserved) ?
 Arrays.asList(node.getReservedContainer().getReservedPriority()) :
@@ -584,8 +582,9 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 // (not scheduled) in order to promote 

[50/50] hadoop git commit: YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.

2015-06-08 Thread zjshen
YARN-3780. Should use equals when compare Resource in
RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ecbac8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ecbac8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ecbac8a

Branch: refs/heads/YARN-2928
Commit: 3ecbac8aad1d1ea88685f4b915a2605617b21b4e
Parents: 0de32f7
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 11:54:55 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecbac8a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 67a705c..da4f3b2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,6 +592,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
 and container reservation. (Zhihai Xu via kasha)
 
+YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
+(zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecbac8a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1263692..8a810cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -622,7 +622,8 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 rmNode.httpPort = newNode.getHttpPort();
 rmNode.httpAddress = newNode.getHttpAddress();
 boolean isCapabilityChanged = false;
-if (rmNode.getTotalCapability() != newNode.getTotalCapability()) {
+if (!rmNode.getTotalCapability().equals(
+newNode.getTotalCapability())) {
   rmNode.totalCapability = newNode.getTotalCapability();
   isCapabilityChanged = true;
 }



[23/50] hadoop git commit: HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation causes NullPointerException. Contributed by Masatake Iwasaki.

2015-06-08 Thread zjshen
HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation 
causes NullPointerException. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/993bf8b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/993bf8b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/993bf8b3

Branch: refs/heads/YARN-2928
Commit: 993bf8b3e638bffcaebe445de36e2adbfd97561a
Parents: 08525ff
Author: Kihwal Lee kih...@apache.org
Authored: Thu Jun 4 12:51:00 2015 -0500
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +++
 .../apache/hadoop/hdfs/TestDFSInputStream.java  | 25 
 3 files changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d65e513..bb65105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -846,6 +846,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-3716. Purger should remove stale fsimage ckpt files
 (J.Andreina via vinayakumarb)
 
+HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream 
creation
+causes NullPointerException (Masatake Iwasaki via kihwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 8a3f730..6563d7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1533,6 +1533,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
*/
   @Override
   public synchronized boolean seekToNewSource(long targetPos) throws 
IOException {
+if (currentNode == null) {
+  return seekToBlockSource(targetPos);
+}
 boolean markedDead = deadNodes.containsKey(currentNode);
 addToDeadNodes(currentNode);
 DatanodeInfo oldNode = currentNode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index b9ec2ce..26412c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.File;
@@ -28,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.junit.Assume;
@@ -111,4 +114,26 @@ public class TestDFSInputStream {
 }
   }
 
+  @Test(timeout=6)
+  public void testSeekToNewSource() throws IOException {
+Configuration conf = new Configuration();
+MiniDFSCluster cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+DistributedFileSystem fs = cluster.getFileSystem();
+Path path = new Path(/testfile);
+DFSTestUtil.createFile(fs, path, 1024, (short) 3, 0);
+DFSInputStream fin = fs.dfs.open(/testfile);
+try {
+  fin.seekToNewSource(100);
+  assertEquals(100, fin.getPos());
+  DatanodeInfo firstNode = fin.getCurrentDatanode();
+  assertNotNull(firstNode);
+  fin.seekToNewSource(100);
+  assertEquals(100, fin.getPos());
+  assertFalse(firstNode.equals(fin.getCurrentDatanode()));
+} finally {
+  

[21/50] hadoop git commit: YARN-41. The RM should handle the graceful shutdown of the NM. Contributed by Devaraj K.

2015-06-08 Thread zjshen
YARN-41. The RM should handle the graceful shutdown of the NM. Contributed by 
Devaraj K.

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/868b9ce8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/868b9ce8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/868b9ce8

Branch: refs/heads/YARN-2928
Commit: 868b9ce8ce5bdab49b6b01b6491e9778a202ed8e
Parents: 8732f97
Author: Junping Du junping...@apache.org
Authored: Thu Jun 4 04:59:27 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:50:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/api/records/NodeState.java  |  10 +-
 .../src/main/proto/yarn_protos.proto|   1 +
 .../hadoop/yarn/server/api/ResourceTracker.java |  16 ++-
 .../pb/client/ResourceTrackerPBClientImpl.java  |  18 +++
 .../service/ResourceTrackerPBServiceImpl.java   |  27 +++-
 .../UnRegisterNodeManagerRequest.java   |  38 ++
 .../UnRegisterNodeManagerResponse.java  |  30 +
 .../pb/UnRegisterNodeManagerRequestPBImpl.java  | 108 
 .../pb/UnRegisterNodeManagerResponsePBImpl.java |  70 +++
 .../src/main/proto/ResourceTracker.proto|   1 +
 .../yarn_server_common_service_protos.proto |   7 ++
 .../yarn/TestResourceTrackerPBClientImpl.java   |  34 -
 .../apache/hadoop/yarn/TestYSCRPCFactories.java |  10 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  12 ++
 .../nodemanager/NodeStatusUpdaterImpl.java  |  33 +
 .../server/nodemanager/LocalRMInterface.java|  10 ++
 .../nodemanager/MockNodeStatusUpdater.java  |   9 ++
 .../nodemanager/TestNodeStatusUpdater.java  |  44 +++
 .../TestNodeStatusUpdaterForLabels.java |   8 ++
 .../server/resourcemanager/ClusterMetrics.java  |  14 +++
 .../resourcemanager/ResourceTrackerService.java |  23 
 .../resourcemanager/rmnode/RMNodeEventType.java |   1 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  20 +++
 .../webapp/MetricsOverviewTable.java|   2 +
 .../resourcemanager/webapp/NodesPage.java   |   1 +
 .../webapp/dao/ClusterMetricsInfo.java  |   8 +-
 .../resourcemanager/TestRMNodeTransitions.java  |  14 +++
 .../TestResourceTrackerService.java | 123 ++-
 .../resourcemanager/webapp/TestNodesPage.java   |   4 +-
 .../webapp/TestRMWebServices.java   |  21 ++--
 .../hadoop/yarn/server/MiniYARNCluster.java |  10 ++
 32 files changed, 702 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 61cc501..0c76206 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -217,6 +217,9 @@ Release 2.8.0 - UNRELEASED
 YARN-160. Enhanced NodeManager to automatically obtain cpu/memory values 
from
 underlying OS when configured to do so. (Varun Vasudev via vinodkv)
 
+YARN-41. The RM should handle the graceful shutdown of the NM. (Devaraj K 
via 
+junping_du)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
index 741046c..d0344fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
@@ -46,9 +46,13 @@ public enum NodeState {
   REBOOTED,
 
   /** Node decommission is in progress */
-  DECOMMISSIONING;
-  
+  DECOMMISSIONING,
+
+  /** Node has shutdown gracefully. */
+  SHUTDOWN;
+
   public boolean isUnusable() {
-return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST);
+return (this == UNHEALTHY || this == DECOMMISSIONED
+|| this == LOST || this == SHUTDOWN);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto

[44/50] hadoop git commit: HDFS-8539. Hdfs doesnt have class debug in windows. Contributed by Anu Engineer.

2015-06-08 Thread zjshen
HDFS-8539. Hdfs doesnt have class debug in windows. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de32f74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de32f74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de32f74

Branch: refs/heads/YARN-2928
Commit: 0de32f748ef70ad1f761b351b8190467865872e8
Parents: 1e2c3de
Author: cnauroth cnaur...@apache.org
Authored: Sun Jun 7 13:01:43 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de32f74/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 21f587f..853a022 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -861,6 +861,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream 
creation
 causes NullPointerException (Masatake Iwasaki via kihwal)
 
+HDFS-8539. Hdfs doesnt have class 'debug' in windows.
+(Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de32f74/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 8115349..2181e47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if %1 == --loglevel (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,6 +179,11 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
+:debug
+  set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
+  goto :eof
+
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if %1 == --config (
@@ -237,4 +242,5 @@ goto :eof
   @echo.
   @echo Most commands print help when invoked w/o parameters.
 
+@rem There are also debug commands, but they don't show up in this listing.
 endlocal



[38/50] hadoop git commit: HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect errors when listing a directory. Contributed by Zhihai Xu.

2015-06-08 Thread zjshen
HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect errors 
when listing a directory. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01cd698b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01cd698b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01cd698b

Branch: refs/heads/YARN-2928
Commit: 01cd698bd5f21d01a654f7c963da6bf46e2b0005
Parents: ddd92aa
Author: Andrew Wang w...@apache.org
Authored: Fri Jun 5 13:52:21 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:00 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/util/DiskChecker.java | 24 
 .../org/apache/hadoop/util/TestDiskChecker.java | 22 ++
 3 files changed, 45 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 51579da..4b1d0d1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -637,6 +637,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12059. S3Credentials should support use of CredentialProvider.
 (Sean Busbey via wang)
 
+HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect
+errors when listing a directory. (Zhihai Xu via wang)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index 6b27ae5..a36a7a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.util;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.DirectoryIteratorException;
+import java.nio.file.Files;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -86,13 +89,26 @@ public class DiskChecker {
*/
   public static void checkDirs(File dir) throws DiskErrorException {
 checkDir(dir);
-for (File child : dir.listFiles()) {
-  if (child.isDirectory()) {
-checkDirs(child);
+IOException ex = null;
+try (DirectoryStreamjava.nio.file.Path stream =
+Files.newDirectoryStream(dir.toPath())) {
+  for (java.nio.file.Path entry: stream) {
+File child = entry.toFile();
+if (child.isDirectory()) {
+  checkDirs(child);
+}
   }
+} catch (DirectoryIteratorException de) {
+  ex = de.getCause();
+} catch (IOException ie) {
+  ex = ie;
+}
+if (ex != null) {
+  throw new DiskErrorException(I/O error when open a directory: 
+  + dir.toString(), ex);
 }
   }
-  
+
   /**
* Create the directory if it doesn't exist and check that dir is readable,
* writable and executable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index 5ab1313..de54735 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Shell;
 
@@ -180,4 +181,25 @@ public class TestDiskChecker {
 System.out.println(checkDir success:  + success);
 
   }
+
+  @Test (timeout = 3)
+  public void testCheckDirsIOException() throws Throwable {
+  

[05/50] hadoop git commit: HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina)

2015-06-08 Thread zjshen
HDFS-8270. create() always retried with hardcoded timeout when file already 
exists with open lease (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2b41375
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2b41375
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2b41375

Branch: refs/heads/YARN-2928
Commit: a2b4137519faeace7b20cdc8da8106234b517215
Parents: f029f9b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jun 3 12:11:46 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  5 +--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 
 .../org/apache/hadoop/hdfs/NameNodeProxies.java | 43 +---
 .../apache/hadoop/hdfs/TestFileCreation.java|  6 ---
 5 files changed, 5 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index abf6452..402a547 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -936,6 +936,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
 McCabe)
 
+HDFS-8270. create() always retried with hardcoded timeout when file already
+exists with open lease (J.Andreina via vinayakumarb)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index fc1cd26..f4ceab3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -342,13 +342,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   this.namenode = rpcNamenode;
   dtService = null;
 } else {
-  boolean noRetries = conf.getBoolean(
-  DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES,
-  DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT);
   Preconditions.checkArgument(nameNodeUri != null,
   null URI);
   proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
-  ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries);
+  ClientProtocol.class, nnFallbackToSimpleAuth);
   this.dtService = proxyInfo.getDelegationTokenService();
   this.namenode = proxyInfo.getProxy();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9c19f91..5bb6e53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -999,13 +998,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = 
dfs.client.test.drop.namenode.response.number;
   public static final int 
DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
 
-  // Create a NN proxy without retries for testing.
-  @VisibleForTesting
-  public static final String  DFS_CLIENT_TEST_NO_PROXY_RETRIES =
-  dfs.client.test.no.proxy.retries;
-  @VisibleForTesting
-  public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false;
-
   public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
   dfs.client.slow.io.warning.threshold.ms;
   public static final longDFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 

[34/50] hadoop git commit: YARN-3259. FairScheduler: Trigger fairShare updates on node events. (Anubhav Dhoot via kasha)

2015-06-08 Thread zjshen
YARN-3259. FairScheduler: Trigger fairShare updates on node events. (Anubhav 
Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82a100d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82a100d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82a100d

Branch: refs/heads/YARN-2928
Commit: f82a100dae589af535c15eac97a5a4aaadede74a
Parents: 7b7063f
Author: Karthik Kambatla ka...@apache.org
Authored: Fri Jun 5 09:39:41 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:59 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FSOpDurations.java   |   6 +
 .../scheduler/fair/FairScheduler.java   |  23 +++-
 .../scheduler/fair/TestSchedulingUpdate.java| 135 +++
 4 files changed, 163 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 69efca4..d5e8bba 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3547. FairScheduler: Apps that have no resource demand should not 
participate 
 scheduling. (Xianyin Xin via kasha)
 
+YARN-3259. FairScheduler: Trigger fairShare updates on node events. 
+(Anubhav Dhoot via kasha)
+
   BUG FIXES
 
 YARN-3197. Confusing log generated by CapacityScheduler. (Varun Saxena 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
index c2282fd..20d2af9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -116,4 +117,9 @@ public class FSOpDurations implements MetricsSource {
   public void addPreemptCallDuration(long value) {
 preemptCall.add(value);
   }
+
+  @VisibleForTesting
+  public boolean hasUpdateThreadRunChanged() {
+return updateThreadRun.changed();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 07b3271..64b3f12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -103,9 +103,9 @@ import com.google.common.base.Preconditions;
  * of the root queue in the typical fair scheduling fashion. Then, the children
  * distribute the resources assigned to them to their children in the same
  * fashion.  Applications may only be scheduled on leaf queues. Queues can be
- * specified as children of other queues by placing them as sub-elements of 
their
- * parents in the fair 

[25/50] hadoop git commit: HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages. Contributed by Kazuho Fujii.

2015-06-08 Thread zjshen
HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages. Contributed 
by Kazuho Fujii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea1a48ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea1a48ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea1a48ad

Branch: refs/heads/YARN-2928
Commit: ea1a48adee9c865785d93551d2dd0db65368adbd
Parents: 868b9ce
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Jun 5 01:45:34 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:56 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 ++--
 .../src/site/markdown/MapredCommands.md  | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf35cfe..3bca0bc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -858,6 +858,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
 infinite loop. (Larry McCay via cnauroth)
 
+HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
+(Kazuho Fujii via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 35081a6..d7f0657 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -98,7 +98,7 @@ Commands useful for users of a hadoop cluster.
 
 ### `archive`
 
-Creates a hadoop archive. More information can be found at [Hadoop Archives 
Guide](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/HadoopArchives.html).
+Creates a hadoop archive. More information can be found at [Hadoop Archives 
Guide](../../hadoop-archives/HadoopArchives.html).
 
 ### `checknative`
 
@@ -157,7 +157,7 @@ Change the ownership and permissions on many files at once.
 
 ### `distcp`
 
-Copy file or directories recursively. More information can be found at [Hadoop 
DistCp 
Guide](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistCp.html).
+Copy file or directories recursively. More information can be found at [Hadoop 
DistCp Guide](../../hadoop-distcp/DistCp.html).
 
 ### `fs`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
index ab0dc9d..9ccee60 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
@@ -51,7 +51,7 @@ Commands useful for users of a hadoop cluster.
 ### `archive`
 
 Creates a hadoop archive. More information can be found at
-[Hadoop Archives Guide](./HadoopArchives.html).
+[Hadoop Archives Guide](../../hadoop-archives/HadoopArchives.html).
 
 ### `classpath`
 
@@ -62,7 +62,7 @@ Usage: `mapred classpath`
 ### `distcp`
 
 Copy file or directories recursively. More information can be found at
-[Hadoop DistCp Guide](./DistCp.html).
+[Hadoop DistCp Guide](../../hadoop-distcp/DistCp.html).
 
 ### `job`
 



[30/50] hadoop git commit: YARN-3766. Fixed the apps table column error of generic history web UI. Contributed by Xuan Gong.

2015-06-08 Thread zjshen
YARN-3766. Fixed the apps table column error of generic history web UI. 
Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8bed307
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8bed307
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8bed307

Branch: refs/heads/YARN-2928
Commit: e8bed3071909d5102f45b5e6ea9bb37f92b06fc7
Parents: eba031e
Author: Zhijie Shen zjs...@apache.org
Authored: Thu Jun 4 14:46:32 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:58 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/applicationhistoryservice/webapp/AHSView.java| 2 +-
 .../java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java  | 4 
 3 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1c36c9b..69efca4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -581,6 +581,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3749. We should make a copy of configuration when init MiniYARNCluster
 with multiple RMs. (Chun Chen via xgong)
 
+YARN-3766. Fixed the apps table column error of generic history web UI.
+(Xuan Gong via zjshen)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
index 152364e..65b5ac1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -40,7 +40,7 @@ public class AHSView extends TwoColumnLayout {
   protected void preHead(Page.HTML_ html) {
 commonPreHead(html);
 set(DATATABLES_ID, apps);
-set(initID(DATATABLES, apps), WebPageUtils.appsTableInit());
+set(initID(DATATABLES, apps), WebPageUtils.appsTableInit(false));
 setTableStyles(html, apps, .queue {width:6em}, .ui {width:8em});
 
 // Set the correct title.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index ed0fe38..df63b77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -27,6 +27,10 @@ public class WebPageUtils {
 return appsTableInit(false, true);
   }
 
+  public static String appsTableInit(boolean isResourceManager) {
+return appsTableInit(false, isResourceManager);
+  }
+
   public static String appsTableInit(
   boolean isFairSchedulerPage, boolean isResourceManager) {
 // id, user, name, queue, starttime, finishtime, state, status, progress, 
ui



[48/50] hadoop git commit: YARN-3747. TestLocalDirsHandlerService should delete the created test directory logDir2. Contributed by David Moore.

2015-06-08 Thread zjshen
YARN-3747. TestLocalDirsHandlerService should delete the created test
directory logDir2. Contributed by David Moore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddf75e34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddf75e34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddf75e34

Branch: refs/heads/YARN-2928
Commit: ddf75e342ba7d502f79f72aebc6fb721b614f81d
Parents: 3ecbac8
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 15:32:13 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddf75e34/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index da4f3b2..ab0dcb9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
 (zhihai xu via devaraj)
 
+YARN-3747. TestLocalDirsHandlerService should delete the created test 
directory logDir2.
+(David Moore via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddf75e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index a045e62..c61d1f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -144,7 +144,7 @@ public class TestLocalDirsHandlerService {
 FileUtils.deleteDirectory(new File(localDir1));
 FileUtils.deleteDirectory(new File(localDir2));
 FileUtils.deleteDirectory(new File(logDir1));
-FileUtils.deleteDirectory(new File(logDir1));
+FileUtils.deleteDirectory(new File(logDir2));
 dirSvc.close();
   }
 }



[17/50] hadoop git commit: MAPREDUCE-6174. Combine common stream code into parent class for InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)

2015-06-08 Thread zjshen
MAPREDUCE-6174. Combine common stream code into parent class for 
InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e585863
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e585863
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e585863

Branch: refs/heads/YARN-2928
Commit: 2e58586316f548a8dd2effbc15d0729d1a622fe3
Parents: 2bff83c
Author: Gera Shegalov g...@apache.org
Authored: Wed Jun 3 16:26:45 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:15 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../task/reduce/IFileWrappedMapOutput.java  | 72 
 .../task/reduce/InMemoryMapOutput.java  | 26 ++-
 .../mapreduce/task/reduce/MergeManagerImpl.java |  5 +-
 .../mapreduce/task/reduce/OnDiskMapOutput.java  | 33 +
 .../mapreduce/task/reduce/TestFetcher.java  | 27 
 6 files changed, 114 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ba94324..5cc08a3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -358,6 +358,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5248. Let NNBenchWithoutMR specify the replication factor for
 its test (Erik Paulson via jlowe)
 
+MAPREDUCE-6174. Combine common stream code into parent class for
+InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
new file mode 100644
index 000..119db15
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.IFileInputStream;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+
+/**
+ * Common code for allowing MapOutput classes to handle streams.
+ *
+ * @param K key type for map output
+ * @param V value type for map output
+ */
+public abstract class IFileWrappedMapOutputK, V extends MapOutputK, V {
+  private final Configuration conf;
+  private final MergeManagerImplK, V merger;
+
+  public IFileWrappedMapOutput(
+  Configuration c, MergeManagerImplK, V m, TaskAttemptID mapId,
+  long size, boolean primaryMapOutput) {
+super(mapId, size, primaryMapOutput);
+conf = c;
+merger = m;
+  }
+
+  /**
+   * @return the merger
+   */
+  protected MergeManagerImplK, V getMerger() {
+return merger;
+  }
+
+  protected abstract void doShuffle(
+  MapHost host, IFileInputStream iFileInputStream,
+  long compressedLength, long decompressedLength,
+  ShuffleClientMetrics metrics, Reporter reporter) throws IOException;
+
+  @Override
+  public void shuffle(MapHost host, InputStream input,
+  long compressedLength, long decompressedLength,
+  ShuffleClientMetrics 

[02/50] hadoop git commit: HDFS-8386. Improve synchronization of 'streamer' reference in DFSOutputStream. Contributed by Rakesh R.

2015-06-08 Thread zjshen
HDFS-8386. Improve synchronization of 'streamer' reference in DFSOutputStream. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd224caf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd224caf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd224caf

Branch: refs/heads/YARN-2928
Commit: bd224cafff167f713704d45244b713cb00202af3
Parents: 0a43670
Author: Andrew Wang w...@apache.org
Authored: Tue Jun 2 15:39:24 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 159 +++
 2 files changed, 92 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd224caf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0822f90..9d427ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -591,6 +591,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8489. Subclass BlockInfo to represent contiguous blocks.
 (Zhe Zhang via jing9)
 
+HDFS-8386. Improve synchronization of 'streamer' reference in
+DFSOutputStream. (Rakesh R via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd224caf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index ae5d3eb..1dc4a9f 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -139,7 +139,7 @@ public class DFSOutputStream extends FSOutputSummer
   @Override
   protected void checkClosed() throws IOException {
 if (isClosed()) {
-  streamer.getLastException().throwException4Close();
+  getStreamer().getLastException().throwException4Close();
 }
   }
 
@@ -148,10 +148,10 @@ public class DFSOutputStream extends FSOutputSummer
   //
   @VisibleForTesting
   public synchronized DatanodeInfo[] getPipeline() {
-if (streamer.streamerClosed()) {
+if (getStreamer().streamerClosed()) {
   return null;
 }
-DatanodeInfo[] currentNodes = streamer.getNodes();
+DatanodeInfo[] currentNodes = getStreamer().getNodes();
 if (currentNodes == null) {
   return null;
 }
@@ -293,9 +293,9 @@ public class DFSOutputStream extends FSOutputSummer
   // indicate that we are appending to an existing block
   streamer = new DataStreamer(lastBlock, stat, dfsClient, src, progress, 
checksum,
   cachingStrategy, byteArrayManager);
-  streamer.setBytesCurBlock(lastBlock.getBlockSize());
+  getStreamer().setBytesCurBlock(lastBlock.getBlockSize());
   adjustPacketChunkSize(stat);
-  streamer.setPipelineInConstruction(lastBlock);
+  getStreamer().setPipelineInConstruction(lastBlock);
 } else {
   computePacketChunkSize(dfsClient.getConf().getWritePacketSize(),
   bytesPerChecksum);
@@ -329,7 +329,7 @@ public class DFSOutputStream extends FSOutputSummer
   //
   computePacketChunkSize(0, freeInCksum);
   setChecksumBufSize(freeInCksum);
-  streamer.setAppendChunk(true);
+  getStreamer().setAppendChunk(true);
 } else {
   // if the remaining space in the block is smaller than
   // that expected size of of a packet, then create
@@ -392,36 +392,36 @@ public class DFSOutputStream extends FSOutputSummer
 }
 
 if (currentPacket == null) {
-  currentPacket = createPacket(packetSize, chunksPerPacket, 
-  streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), 
false);
+  currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer()
+  .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false);
   if (DFSClient.LOG.isDebugEnabled()) {
 DFSClient.LOG.debug(DFSClient writeChunk allocating new packet 
seqno= + 
 currentPacket.getSeqno() +
 , src= + src +
 , packetSize= + packetSize +
 , chunksPerPacket= + chunksPerPacket +
-, bytesCurBlock= + streamer.getBytesCurBlock());
+, bytesCurBlock= + getStreamer().getBytesCurBlock());
   }
 }
 
 

[12/50] hadoop git commit: Revert YARN-1462. Correct fix version from branch-2.7.1 to branch-2.8 in

2015-06-08 Thread zjshen
Revert YARN-1462. Correct fix version from branch-2.7.1 to branch-2.8 in

This reverts commit 0b5cfacde638bc25cc010cd9236369237b4e51a8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89899fed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89899fed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89899fed

Branch: refs/heads/YARN-2928
Commit: 89899fed769a42cdb2164aa17e5881086095f2fb
Parents: b9e8f79
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Jun 3 14:15:31 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89899fed/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fb9badc..ce77941 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -377,9 +377,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3467. Expose allocatedMB, allocatedVCores, and runningContainers 
metrics on 
 running Applications in RM Web UI. (Anubhav Dhoot via kasha)
 
-YARN-1462. Made RM write application tags to timeline server and exposed 
them
-to users via generic history web UI and REST API. (Xuan Gong via zjshen)
-
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -595,6 +592,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3723. Need to clearly document primaryFilter and otherInfo value type.
 (Zhijie Shen via xgong)
 
+YARN-1462. Made RM write application tags to timeline server and exposed 
them
+to users via generic history web UI and REST API. (Xuan Gong via zjshen)
+
   OPTIMIZATIONS
 
   BUG FIXES



[04/50] hadoop git commit: HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe)

2015-06-08 Thread zjshen
HDFS-8521. Add VisibleForTesting annotation to 
BlockPoolSlice#selectReplicaToDelete. (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb3037e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb3037e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb3037e6

Branch: refs/heads/YARN-2928
Commit: fb3037e645371a8cf3ba88644203f584dea6e41d
Parents: 0f407fc
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jun 2 20:06:28 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java   | 3 +++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3037e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2ce54c4..abf6452 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -862,6 +862,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8213. DFSClient should use hdfs.client.htrace HTrace configuration
 prefix rather than hadoop.htrace (cmccabe)
 
+HDFS-8521. Add VisibleForTesting annotation to
+BlockPoolSlice#selectReplicaToDelete. (cmccabe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3037e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 94aaf21..d1f7c5f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -57,7 +57,9 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.io.Files;
+
 /**
  * A block pool slice represents a portion of a block pool stored on a volume. 
 
  * Taken together, all BlockPoolSlices sharing a block pool ID across a 
@@ -562,6 +564,7 @@ class BlockPoolSlice {
 return replicaToKeep;
   }
 
+  @VisibleForTesting
   static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
   final ReplicaInfo replica2) {
 ReplicaInfo replicaToKeep;



[46/50] hadoop git commit: HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from RollingWindowManager. Contributed by Brahma Reddy Battula.

2015-06-08 Thread zjshen
HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from RollingWindowManager. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee73b535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee73b535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee73b535

Branch: refs/heads/YARN-2928
Commit: ee73b535dd0ae25f43c34b95e5565f293fde1b19
Parents: bcf4319
Author: Xiaoyu Yao x...@apache.org
Authored: Sat Jun 6 18:47:45 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../server/namenode/top/window/RollingWindowManager.java | 8 +++-
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee73b535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7f7f98..21f587f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -606,6 +606,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8432. Introduce a minimum compatible layout version to allow downgrade
 in more rolling upgrade use cases. (cnauroth)
 
+HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from
+RollingWindowManager. (Brahma Reddy Battula via xyao)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee73b535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index 4759cc8..63438ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -187,11 +187,9 @@ public class RollingWindowManager {
*/
   public TopWindow snapshot(long time) {
 TopWindow window = new TopWindow(windowLenMs);
-if (LOG.isDebugEnabled()) {
-  SetString metricNames = metricMap.keySet();
-  LOG.debug(iterating in reported metrics, size={} values={},
-  metricNames.size(), metricNames);
-}
+SetString metricNames = metricMap.keySet();
+LOG.debug(iterating in reported metrics, size={} values={},
+metricNames.size(), metricNames);
 for (Map.EntryString, RollingWindowMap entry : metricMap.entrySet()) {
   String metricName = entry.getKey();
   RollingWindowMap rollingWindows = entry.getValue();



[22/50] hadoop git commit: YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent to another. Contributed by Wangda Tan

2015-06-08 Thread zjshen
YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent to 
another. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94db4f21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94db4f21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94db4f21

Branch: refs/heads/YARN-2928
Commit: 94db4f218b4376ba8547831a371366d409fd9ca1
Parents: 993bf8b
Author: Jian He jia...@apache.org
Authored: Thu Jun 4 10:52:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:56 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../scheduler/capacity/CapacityScheduler.java   | 11 +--
 .../scheduler/capacity/TestQueueParsing.java| 33 
 3 files changed, 45 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 83aa12f..972066d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -717,6 +717,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3733. Fix DominantRC#compare() does not work as expected if 
 cluster resource is empty. (Rohith Sharmaks via wangda)
 
+YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent
+to another. (Wangda Tan via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 06d282d..f1d0f9c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -551,8 +551,15 @@ public class CapacityScheduler extends
 // check that all static queues are included in the newQueues list
 for (Map.EntryString, CSQueue e : queues.entrySet()) {
   if (!(e.getValue() instanceof ReservationQueue)) {
-if (!newQueues.containsKey(e.getKey())) {
-  throw new IOException(e.getKey() +  cannot be found during 
refresh!);
+String queueName = e.getKey();
+CSQueue oldQueue = e.getValue();
+CSQueue newQueue = newQueues.get(queueName); 
+if (null == newQueue) {
+  throw new IOException(queueName +  cannot be found during 
refresh!);
+} else if (!oldQueue.getQueuePath().equals(newQueue.getQueuePath())) {
+  throw new IOException(queueName +  is moved from:
+  + oldQueue.getQueuePath() +  to: + newQueue.getQueuePath()
+  +  after refresh, which is not allowed.);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
index 8d04700..198bd4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
@@ -865,4 +865,37 @@ public class TestQueueParsing {
 

[10/50] hadoop git commit: HDFS-8523. Remove usage information on unsupported operation 'fsck -showprogress' from branch-2 (Contributed by J.Andreina)

2015-06-08 Thread zjshen
HDFS-8523. Remove usage information on unsupported operation 'fsck 
-showprogress' from branch-2 (Contributed by J.Andreina)

Merged CHANGES.txt from branch-2.7

(cherry picked from commit 0ed9c2d8fec93b5dac9c305eda272ad8dfd869a9)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md

(cherry picked from commit dd98cfd328dddb01a1220786d28a80195021611b)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de67969
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de67969
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de67969

Branch: refs/heads/YARN-2928
Commit: 6de679697cc91d4a337d420b5e4e5ad994df150b
Parents: b8dd317
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jun 3 15:15:44 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de67969/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8cbe0e5..3e25129 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -942,6 +942,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8270. create() always retried with hardcoded timeout when file already
 exists with open lease (J.Andreina via vinayakumarb)
 
+HDFS-8523. Remove usage information on unsupported operation
+fsck -showprogress from branch-2 (J.Andreina via vinayakumarb)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[32/50] hadoop git commit: MAPREDUCE-6377. JHS sorting on state column not working in webUi. Contributed by zhihai xu.

2015-06-08 Thread zjshen
MAPREDUCE-6377. JHS sorting on state column not working in webUi.
Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8f7f173
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8f7f173
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8f7f173

Branch: refs/heads/YARN-2928
Commit: c8f7f173778206e3aab05fe30572426bc6ae4001
Parents: 42ba35b
Author: Devaraj K deva...@apache.org
Authored: Fri Jun 5 15:50:16 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:58 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java   | 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8f7f173/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 391303e..d78fb9c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -474,6 +474,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview.
 (Siqi Li via gera)
 
+MAPREDUCE-6377. JHS sorting on state column not working in webUi.
+(zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8f7f173/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
index 59b7aa6..229bbb0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
@@ -87,10 +87,11 @@ public class HsView extends TwoColumnLayout {
 append(, bProcessing: true).
 
 // Sort by id upon page load
-append(, aaSorting: [[2, 'desc']]).
+append(, aaSorting: [[3, 'desc']]).
 append(, aoColumnDefs:[).
 // Maps Total, Maps Completed, Reduces Total and Reduces Completed
-append({'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7, 8, 
9, 10 ] }).
+append({'sType':'numeric', 'bSearchable': false +
+, 'aTargets': [ 8, 9, 10, 11 ] }).
 append(]}).
 toString();
   }



[20/50] hadoop git commit: HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files and is created by 'git diff --no-prefix'. Contributed by Kengo Seki.

2015-06-08 Thread zjshen
HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files and is 
created by 'git diff --no-prefix'. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb2903ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb2903ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb2903ca

Branch: refs/heads/YARN-2928
Commit: bb2903ca235015e05eba60c1077f70f54c8de2df
Parents: c439926
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Jun 4 11:14:55 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:16 2015 -0700

--
 dev-support/smart-apply-patch.sh| 47 
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 22 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb2903ca/dev-support/smart-apply-patch.sh
--
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index 449fc22..be29c47 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -12,7 +12,7 @@
 #   limitations under the License.
 
 #
-# Determine if the patch file is a git diff file with prefixes.
+# Determine if the git diff patch file has prefixes.
 # These files are generated via git diff *without* the --no-prefix option.
 #
 # We can apply these patches more easily because we know that the a/ and b/
@@ -21,28 +21,13 @@
 # And of course, we know that the patch file was generated using git, so we
 # know git apply can handle it properly.
 #
-# Arguments: file name.
-# Return: 0 if it is a git diff; 1 otherwise.
+# Arguments: git diff file name.
+# Return: 0 if it is a git diff with prefix; 1 otherwise.
 #
-is_git_diff_with_prefix() {
-  DIFF_TYPE=unknown
-  while read -r line; do
-if [[ $line =~ ^diff\  ]]; then
-  if [[ $line =~ ^diff\ \-\-git ]]; then
-DIFF_TYPE=git
-  else
-return 1 # All diff lines must be diff --git lines.
-  fi
-fi
-if [[ $line =~ ^\+\+\+\  ]] ||
-   [[ $line =~ ^\-\-\-\  ]]; then
-  if ! [[ $line =~ ^[ab]/ || $line =~ ^/dev/null ]]; then
-return 1 # All +++ and --- lines must start with a/ or b/ or be 
/dev/null.
-  fi
-fi
-  done  $1
-  [ x$DIFF_TYPE == xgit ] || return 1
-  return 0 # return true (= 0 in bash)
+has_prefix() {
+  awk '/^diff --git / { if ($3 !~ ^a/ || $4 !~ ^b/) { exit 1 } }
+/^\+{3}|-{3} / { if ($2 !~ ^[ab]/  $2 !~ ^/dev/null) { exit 1 } }' 
$1
+  return $?
 }
 
 PATCH_FILE=$1
@@ -100,15 +85,21 @@ if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ 
${ISSUE_RE} ]]; then
   PATCH_FILE=${PFILE}
 fi
 
-# Special case for git-diff patches without --no-prefix
-if is_git_diff_with_prefix $PATCH_FILE; then
-  GIT_FLAGS=--binary -p1 -v
+# Case for git-diff patches
+if grep -q ^diff --git ${PATCH_FILE}; then
+  GIT_FLAGS=--binary -v
+  if has_prefix $PATCH_FILE; then
+GIT_FLAGS=$GIT_FLAGS -p1
+  else
+GIT_FLAGS=$GIT_FLAGS -p0
+  fi
   if [[ -z $DRY_RUN ]]; then
-  GIT_FLAGS=$GIT_FLAGS --stat --apply 
-  echo Going to apply git patch with: git apply ${GIT_FLAGS}
+GIT_FLAGS=$GIT_FLAGS --stat --apply
+echo Going to apply git patch with: git apply ${GIT_FLAGS}
   else
-  GIT_FLAGS=$GIT_FLAGS --check 
+GIT_FLAGS=$GIT_FLAGS --check
   fi
+  # shellcheck disable=SC2086
   git apply ${GIT_FLAGS} ${PATCH_FILE}
   exit $?
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb2903ca/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c1fe41..53bb150 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -814,6 +814,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh
 is modified. (Kengo Seki via aajisaka)
 
+HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files
+and is created by 'git diff --no-prefix'. (Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[39/50] hadoop git commit: HADOOP-12055. Deprecate usage of NativeIO#link. Contributed by Andrew Wang.

2015-06-08 Thread zjshen
HADOOP-12055. Deprecate usage of NativeIO#link. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd8bd6bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd8bd6bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd8bd6bc

Branch: refs/heads/YARN-2928
Commit: cd8bd6bc6fa4c1bfdab374f01ac769f4f0cdbc9e
Parents: f8153dd
Author: cnauroth cnaur...@apache.org
Authored: Sat Jun 6 09:17:03 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:00 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  2 ++
 .../java/org/apache/hadoop/io/nativeio/NativeIO.java | 11 +++
 .../apache/hadoop/hdfs/server/datanode/DataStorage.java  |  2 +-
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4b1d0d1..eacc3be 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -640,6 +640,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect
 errors when listing a directory. (Zhihai Xu via wang)
 
+HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index bc6e62a..688b955 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -881,6 +881,17 @@ public class NativeIO {
 }
   }
 
+  /**
+   * Creates a hardlink dst that points to src.
+   *
+   * This is deprecated since JDK7 NIO can create hardlinks via the
+   * {@link java.nio.file.Files} API.
+   *
+   * @param src source file
+   * @param dst hardlink location
+   * @throws IOException
+   */
+  @Deprecated
   public static void link(File src, File dst) throws IOException {
 if (!nativeLoaded) {
   HardLink.createHardLink(src, dst);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 089e032..0bd08dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1044,7 +1044,7 @@ public class DataStorage extends Storage {
   idBasedLayoutSingleLinks.size());
   for (int j = iCopy; j  upperBound; j++) {
 LinkArgs cur = idBasedLayoutSingleLinks.get(j);
-NativeIO.link(cur.src, cur.dst);
+HardLink.createHardLink(cur.src, cur.dst);
   }
   return null;
 }



[06/50] hadoop git commit: HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh is modified. Contributed by Kengo Seki.

2015-06-08 Thread zjshen
HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh is 
modified. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f029f9b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f029f9b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f029f9b9

Branch: refs/heads/YARN-2928
Commit: f029f9b95a65372ed52589e5b9c9eda53b311b6b
Parents: fb3037e
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Jun 3 15:01:02 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:12 2015 -0700

--
 dev-support/test-patch.sh   | 7 ---
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f029f9b9/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 1409467..cd91a5c 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1473,7 +1473,8 @@ function apply_patch_file
 }
 
 
-## @description  If this patches actually patches test-patch.sh, then
+## @description  If this actually patches the files used for the QA process
+## @description  under dev-support and its subdirectories, then
 ## @description  run with the patched version for the test.
 ## @audience private
 ## @stabilityevolving
@@ -1489,7 +1490,7 @@ function check_reexec
   fi
 
   if [[ ! ${CHANGED_FILES} =~ dev-support/test-patch
-  || ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
+  ! ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
 return
   fi
 
@@ -1510,7 +1511,7 @@ function check_reexec
 
 rm ${commentfile} 2/dev/null
 
-echo (!) A patch to test-patch or smart-apply-patch has been detected.  
 ${commentfile}
+echo (!) A patch to the files used for the QA process has been detected. 
  ${commentfile}
 echo Re-executing against the patched versions to perform further tests. 
  ${commentfile}
 echo The console is at ${BUILD_URL}console in case of problems.  
${commentfile}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f029f9b9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a0e6e90..5c1fe41 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -811,6 +811,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12042. Users may see TrashPolicy if hdfs dfs -rm is run
 (Andreina J via vinayakumarb)
 
+HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh
+is modified. (Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[49/50] hadoop git commit: HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers trying to use them. (Brahma Reddy Battula via stevel)

2015-06-08 Thread zjshen
HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers 
trying to use them. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77e5bae7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77e5bae7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77e5bae7

Branch: refs/heads/YARN-2928
Commit: 77e5bae7a19c4aeaf1adbb7034d488f4299f0447
Parents: ddf75e3
Author: Steve Loughran ste...@apache.org
Authored: Mon Jun 8 13:02:26 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:02 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Client.java  | 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e5bae7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eacc3be..79f3178 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -834,6 +834,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11924. Tolerate JDK-8047340-related exceptions in
 Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
 
+HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
+callers trying to use them. (Brahma Reddy Battula via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e5bae7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index feb811e..6996a51 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1484,7 +1484,13 @@ public class Client {
   }
 });
   } catch (ExecutionException e) {
-throw new IOException(e);
+Throwable cause = e.getCause();
+// the underlying exception should normally be IOException
+if (cause instanceof IOException) {
+  throw (IOException) cause;
+} else {
+  throw new IOException(cause);
+}
   }
   if (connection.addCall(call)) {
 break;



[40/50] hadoop git commit: HADOOP-12059. S3Credentials should support use of CredentialProvider. Contributed by Sean Busbey.

2015-06-08 Thread zjshen
HADOOP-12059. S3Credentials should support use of CredentialProvider. 
Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddd92aa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddd92aa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddd92aa8

Branch: refs/heads/YARN-2928
Commit: ddd92aa8d515742fe214848a9cdaa1517f7f6349
Parents: 9d0d5dc
Author: Andrew Wang w...@apache.org
Authored: Fri Jun 5 13:11:01 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:00 2015 -0700

--
 .gitignore  |   1 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/security/ProviderUtils.java   |  30 ++
 .../alias/AbstractJavaKeyStoreProvider.java |  10 +-
 .../alias/LocalJavaKeyStoreProvider.java|  25 -
 .../alias/TestCredentialProviderFactory.java|  17 ++-
 .../org/apache/hadoop/fs/s3/S3Credentials.java  |  10 +-
 .../apache/hadoop/fs/s3/TestS3Credentials.java  | 107 ++-
 8 files changed, 195 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 779f507..cde198e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,5 +22,6 @@ 
hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 yarnregistry.pdf
+hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5f4bdb8..51579da 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -634,6 +634,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12037. Fix wrong classname in example configuration of hadoop-auth
 documentation. (Masatake Iwasaki via wang)
 
+HADOOP-12059. S3Credentials should support use of CredentialProvider.
+(Sean Busbey via wang)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
index 97d656d..b764506 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
@@ -19,8 +19,11 @@
 package org.apache.hadoop.security;
 
 import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
+import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
 
 public class ProviderUtils {
   /**
@@ -49,4 +52,31 @@ public class ProviderUtils {
 }
 return new Path(result.toString());
   }
+
+  /**
+   * Mangle given local java keystore file URI to allow use as a
+   * LocalJavaKeyStoreProvider.
+   * @param localFile absolute URI with file scheme and no authority component.
+   *  i.e. return of File.toURI,
+   *  e.g. file:///home/larry/creds.jceks
+   * @return URI of the form localjceks://file/home/larry/creds.jceks
+   * @throws IllegalArgumentException if localFile isn't not a file uri or if 
it
+   *  has an authority component.
+   * @throws URISyntaxException if the wrapping process violates RFC 2396
+   */
+  public static URI nestURIForLocalJavaKeyStoreProvider(final URI localFile)
+  throws URISyntaxException {
+if (!(file.equals(localFile.getScheme( {
+  throw new IllegalArgumentException(passed URI had a scheme other than  
+
+  file.);
+}
+if (localFile.getAuthority() != null) {
+  throw new IllegalArgumentException(passed URI must not have an  +
+  authority component. For non-local keystores, please use  +
+  JavaKeyStoreProvider.class.getName());
+}
+return 

[15/50] hadoop git commit: MAPREDUCE-5965. Hadoop streaming throws error if list of input files is high. Error is: error=7, Argument list too long at if number of input file is high (wilfreds via rk

2015-06-08 Thread zjshen
MAPREDUCE-5965. Hadoop streaming throws error if list of input files is high. 
Error is: error=7, Argument list too long at if number of input file is high 
(wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4399269
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4399269
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4399269

Branch: refs/heads/YARN-2928
Commit: c43992691ad19de62d0ceb95346c6f30c9b267b8
Parents: 8d39b34
Author: Robert Kanter rkan...@apache.org
Authored: Wed Jun 3 18:41:45 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:15 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt |  4 
 .../org/apache/hadoop/streaming/PipeMapRed.java  | 19 +++
 .../org/apache/hadoop/streaming/StreamJob.java   |  5 -
 .../src/site/markdown/HadoopStreaming.md.vm  |  9 +
 4 files changed, 28 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5cc08a3..9fa6c5a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -467,6 +467,10 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6374. Distributed Cache File visibility should check permission
 of full path (Chang Li via jlowe)
 
+MAPREDUCE-5965. Hadoop streaming throws error if list of input files is
+high. Error is: error=7, Argument list too long at if number of input
+file is high (wilfreds via rkanter)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
index f47e756..77c7252 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
@@ -19,8 +19,7 @@
 package org.apache.hadoop.streaming;
 
 import java.io.*;
-import java.util.Map;
-import java.util.Iterator;
+import java.util.Map.Entry;
 import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Properties;
@@ -238,13 +237,17 @@ public abstract class PipeMapRed {
   void addJobConfToEnvironment(JobConf jobconf, Properties env) {
 JobConf conf = new JobConf(jobconf);
 conf.setDeprecatedProperties();
-Iterator it = conf.iterator();
-while (it.hasNext()) {
-  Map.Entry en = (Map.Entry) it.next();
-  String name = (String) en.getKey();
-  //String value = (String)en.getValue(); // does not apply variable 
expansion
-  String value = conf.get(name); // does variable expansion 
+int lenLimit = conf.getInt(stream.jobconf.truncate.limit, -1);
+
+for (EntryString, String confEntry: conf) {
+  String name = confEntry.getKey();
+  String value = conf.get(name); // does variable expansion
   name = safeEnvVarName(name);
+  if (lenLimit  -1   value.length()  lenLimit) {
+LOG.warn(Environment variable  + name +  truncated to  + lenLimit
++  to  fit system limits.);
+value = value.substring(0, lenLimit);
+  }
   envPut(env, name, value);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
index 7ff5641..118e0fb 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
@@ -617,7 +617,10 @@ public class StreamJob implements Tool {
 /path/my-hadoop-streaming.jar);
 System.out.println(For more details about jobconf parameters see:);
 System.out.println(  http://wiki.apache.org/hadoop/JobConfFile;);
-System.out.println(To set an environement variable in a streaming  +
+System.out.println(Truncate the values of the job configuration copied +
+to the environment at the given length:);
+System.out.println(   -D 

[45/50] hadoop git commit: HDFS-8432. Introduce a minimum compatible layout version to allow downgrade in more rolling upgrade use cases. Contributed by Chris Nauroth.

2015-06-08 Thread zjshen
HDFS-8432. Introduce a minimum compatible layout version to allow downgrade in 
more rolling upgrade use cases. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcf4319c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcf4319c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcf4319c

Branch: refs/heads/YARN-2928
Commit: bcf4319c4128b3beb554eed9c0950e6f2c70be29
Parents: cd8bd6b
Author: cnauroth cnaur...@apache.org
Authored: Sat Jun 6 09:43:47 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/LayoutVersion.java |  51 +-
 .../hdfs/server/namenode/BackupImage.java   |   2 +-
 .../hdfs/server/namenode/Checkpointer.java  |   4 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  24 ++---
 .../hadoop/hdfs/server/namenode/FSImage.java|  20 ++--
 .../server/namenode/FSImageFormatProtobuf.java  |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 100 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   3 +-
 .../server/namenode/NameNodeLayoutVersion.java  |  46 ++---
 .../hdfs/server/namenode/SecondaryNameNode.java |   4 +-
 .../hadoop/hdfs/protocol/TestLayoutVersion.java | 101 ++-
 .../hdfs/server/namenode/CreateEditsLog.java|   2 +-
 .../hdfs/server/namenode/FSImageTestUtil.java   |   2 +-
 .../hdfs/server/namenode/TestEditLog.java   |  16 +--
 .../server/namenode/TestFSEditLogLoader.java|   2 +-
 16 files changed, 306 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 72ab17b..f7f7f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -603,6 +603,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all
 block replicas. (Eddy Xu via wang)
 
+HDFS-8432. Introduce a minimum compatible layout version to allow downgrade
+in more rolling upgrade use cases. (cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index 349f72c..c893744 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -162,14 +162,22 @@ public class LayoutVersion {
   public static class FeatureInfo {
 private final int lv;
 private final int ancestorLV;
+private final Integer minCompatLV;
 private final String description;
 private final boolean reserved;
 private final LayoutFeature[] specialFeatures;
 
 public FeatureInfo(final int lv, final int ancestorLV, final String 
description,
 boolean reserved, LayoutFeature... specialFeatures) {
+  this(lv, ancestorLV, null, description, reserved, specialFeatures);
+}
+
+public FeatureInfo(final int lv, final int ancestorLV, Integer minCompatLV,
+final String description, boolean reserved,
+LayoutFeature... specialFeatures) {
   this.lv = lv;
   this.ancestorLV = ancestorLV;
+  this.minCompatLV = minCompatLV;
   this.description = description;
   this.reserved = reserved;
   this.specialFeatures = specialFeatures;
@@ -191,7 +199,20 @@ public class LayoutVersion {
   return ancestorLV;
 }
 
-/** 
+/**
+ * Accessor method for feature minimum compatible layout version.  If the
+ * feature does not define a minimum compatible layout version, then this
+ * method returns the feature's own layout version.  This would indicate
+ * that the feature cannot provide compatibility with any prior layout
+ * version.
+ *
+ * @return int minimum compatible LV value
+ */
+public int getMinimumCompatibleLayoutVersion() {
+  return minCompatLV != null ? minCompatLV : lv;
+}
+
+/**
  * Accessor method for feature description 
  * @return String feature description 
  */
@@ 

[13/50] hadoop git commit: MAPREDUCE-6374. Distributed Cache File visibility should check permission of full path. Contributed by Chang Li

2015-06-08 Thread zjshen
MAPREDUCE-6374. Distributed Cache File visibility should check permission of 
full path. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95dd42b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95dd42b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95dd42b4

Branch: refs/heads/YARN-2928
Commit: 95dd42b458d026371eb0c4109dff6d8da654f206
Parents: d6e1fd0
Author: Jason Lowe jl...@apache.org
Authored: Wed Jun 3 20:19:27 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:14 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../ClientDistributedCacheManager.java  |  1 +
 .../TestClientDistributedCacheManager.java  | 28 
 3 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c0df835..ba94324 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -461,6 +461,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6204. TestJobCounters should use new properties instead of
 JobConf.MAPRED_TASK_JAVA_OPTS. (Sam Liu via ozawa)
 
+MAPREDUCE-6374. Distributed Cache File visibility should check permission
+of full path (Chang Li via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
index 23f3cfc..c15e647 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
@@ -236,6 +236,7 @@ public class ClientDistributedCacheManager {
   MapURI, FileStatus statCache) throws IOException {
 FileSystem fs = FileSystem.get(uri, conf);
 Path current = new Path(uri.getPath());
+current = fs.makeQualified(current);
 //the leaf level file should be readable by others
 if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
   return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
index 4824ba3..902cbfc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
@@ -47,9 +48,13 @@ public class TestClientDistributedCacheManager {
   new File(System.getProperty(test.build.data, /tmp)).toURI()
   .toString().replace(' ', '+');
   
+  private static final String TEST_VISIBILITY_DIR =
+  new File(TEST_ROOT_DIR, TestCacheVisibility).toURI()
+  .toString().replace(' ', '+');
   private FileSystem fs;
   private Path firstCacheFile;
   private Path secondCacheFile;
+  private 

[18/50] hadoop git commit: MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview. (Siqi Li via gera)

2015-06-08 Thread zjshen
MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview. 
(Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8732f97c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8732f97c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8732f97c

Branch: refs/heads/YARN-2928
Commit: 8732f97cab213d54c5aa1d0e1ecfedbe758c2e33
Parents: df96753
Author: Gera Shegalov g...@apache.org
Authored: Wed Jun 3 23:41:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8732f97c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9fa6c5a..391303e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -471,6 +471,9 @@ Release 2.8.0 - UNRELEASED
 high. Error is: error=7, Argument list too long at if number of input
 file is high (wilfreds via rkanter)
 
+MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview.
+(Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8732f97c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index f3341a6..dbd1dee 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -101,7 +101,7 @@ public class HsJobBlock extends HtmlBlock {
   for(String diag: diagnostics) {
 b.append(addTaskLinks(diag));
   }
-  infoBlock._(Diagnostics:, b.toString());
+  infoBlock._r(Diagnostics:, b.toString());
 }
 
 if(job.getNumMaps()  0) {



[03/50] hadoop git commit: HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (Contributed by surendra singh lilhore)

2015-06-08 Thread zjshen
HDFS-8470. fsimage loading progress should update inode, delegation token and 
cache pool count. (Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8dd3170
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8dd3170
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8dd3170

Branch: refs/heads/YARN-2928
Commit: b8dd3170c89333c8d2fc3b231773769d31473703
Parents: a2b4137
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jun 3 14:24:55 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/FSImageFormatPBINode.java   | 15 --
 .../server/namenode/FSImageFormatProtobuf.java  | 30 ++--
 3 files changed, 36 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 402a547..8cbe0e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -840,6 +840,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8256. -storagepolicies , -blockId ,-replicaDetails  options are 
missed
 out in usage and from documentation (J.Andreina via vinayakumarb)
 
+HDFS-8470. fsimage loading progress should update inode, delegation token 
and
+cache pool count. (surendra singh lilhore via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 1c14220..e8378e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -58,6 +58,10 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFea
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+import 
org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
@@ -251,11 +255,15 @@ public final class FSImageFormatPBINode {
   }
 }
 
-void loadINodeSection(InputStream in) throws IOException {
+void loadINodeSection(InputStream in, StartupProgress prog,
+Step currentStep) throws IOException {
   INodeSection s = INodeSection.parseDelimitedFrom(in);
   fsn.dir.resetLastInodeId(s.getLastInodeId());
-  LOG.info(Loading  + s.getNumInodes() +  INodes.);
-  for (int i = 0; i  s.getNumInodes(); ++i) {
+  long numInodes = s.getNumInodes();
+  LOG.info(Loading  + numInodes +  INodes.);
+  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
+  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
+  for (int i = 0; i  numInodes; ++i) {
 INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
 if (p.getId() == INodeId.ROOT_INODE_ID) {
   loadRootINode(p);
@@ -263,6 +271,7 @@ public final class FSImageFormatPBINode {
   INode n = loadINode(p);
   dir.addToInodeMap(n);
 }
+counter.increment();
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
 

[36/50] hadoop git commit: MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at the end. (Arun Suresh via kasha)

2015-06-08 Thread zjshen
MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at 
the end. (Arun Suresh via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b7063f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b7063f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b7063f2

Branch: refs/heads/YARN-2928
Commit: 7b7063f2e8960053c41f12a39b2557953a2ddac3
Parents: a0962cd
Author: Karthik Kambatla ka...@apache.org
Authored: Fri Jun 5 09:14:06 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:59 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/Task.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7063f2/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 12e3a3f..e7c02c0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -520,6 +520,9 @@ Release 2.7.1 - UNRELEASED
 copySucceeded() in one thread and copyFailed() in another thread on the
 same host. (Junping Du via ozawa)
 
+MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field 
at 
+the end. (Arun Suresh via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7063f2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index c07d517..673f183 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -513,8 +513,8 @@ abstract public class Task implements Writable, 
Configurable {
 out.writeBoolean(taskCleanup);
 Text.writeString(out, user);
 out.writeInt(encryptedSpillKey.length);
-out.write(encryptedSpillKey);
 extraData.write(out);
+out.write(encryptedSpillKey);
   }
   
   public void readFields(DataInput in) throws IOException {
@@ -541,8 +541,8 @@ abstract public class Task implements Writable, 
Configurable {
 user = StringInterner.weakIntern(Text.readString(in));
 int len = in.readInt();
 encryptedSpillKey = new byte[len];
-in.readFully(encryptedSpillKey);
 extraData.readFields(in);
+in.readFully(encryptedSpillKey);
   }
 
   @Override



[19/50] hadoop git commit: HADOOP-12019. update BUILDING.txt to include python for 'mvn site' in windows (Contributed by Vinayakumar B)

2015-06-08 Thread zjshen
HADOOP-12019. update BUILDING.txt to include python for 'mvn site' in windows 
(Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df96753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df96753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df96753b

Branch: refs/heads/YARN-2928
Commit: df96753bf14854858476661e1c9b23062983982f
Parents: bb2903c
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Jun 4 10:42:52 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:16 2015 -0700

--
 BUILDING.txt| 1 +
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df96753b/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index de0e0e8..2aeade4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -282,6 +282,7 @@ Requirements:
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 * Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
   tools must be present on your PATH.
+* Python ( for generation of docs using 'mvn site')
 
 Unix command-line tools are also included with the Windows Git package which
 can be downloaded from http://git-scm.com/download/win.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df96753b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 53bb150..cf35cfe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -817,6 +817,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files
 and is created by 'git diff --no-prefix'. (Kengo Seki via aajisaka)
 
+HADOOP-12019. update BUILDING.txt to include python for 'mvn site'
+in windows (vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[16/50] hadoop git commit: YARN-3749. We should make a copy of configuration when init MiniYARNCluster with multiple RMs. Contributed by Chun Chen

2015-06-08 Thread zjshen
YARN-3749. We should make a copy of configuration when init
MiniYARNCluster with multiple RMs. Contributed by Chun Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d39b344
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d39b344
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d39b344

Branch: refs/heads/YARN-2928
Commit: 8d39b344afe459309d73fe647e3459addf0415a4
Parents: 2e58586
Author: Xuan xg...@apache.org
Authored: Wed Jun 3 17:20:15 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  2 +-
 .../hadoop/yarn/client/ProtocolHATestBase.java  | 26 ++
 ...estApplicationMasterServiceProtocolOnHA.java | 10 +++---
 .../hadoop/yarn/client/TestRMFailover.java  | 26 ++
 .../hadoop/yarn/conf/TestYarnConfiguration.java | 22 
 .../ApplicationMasterService.java   | 19 +-
 .../yarn/server/resourcemanager/HATestUtil.java | 38 
 .../resourcemanager/TestRMEmbeddedElector.java  | 24 ++---
 .../hadoop/yarn/server/MiniYARNCluster.java | 22 +++-
 .../hadoop/yarn/server/TestMiniYarnCluster.java | 37 +++
 11 files changed, 138 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 95a2325..61cc501 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -572,6 +572,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)
 
+YARN-3749. We should make a copy of configuration when init MiniYARNCluster
+with multiple RMs. (Chun Chen via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ba07c80..e4ae2b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1991,7 +1991,7 @@ public class YarnConfiguration extends Configuration {
   public InetSocketAddress updateConnectAddr(String name,
  InetSocketAddress addr) {
 String prefix = name;
-if (HAUtil.isHAEnabled(this)) {
+if (HAUtil.isHAEnabled(this)  
getServiceAddressConfKeys(this).contains(name)) {
   prefix = HAUtil.addSuffix(prefix, HAUtil.getRMHAId(this));
 }
 return super.updateConnectAddr(prefix, addr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 903dd94..75e6cee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -36,6 +36,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
@@ -101,7 +102,6 @@ import 
org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 

[01/50] hadoop git commit: HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to BlockPlacementPolicyRackFaultTolerant. (wang)

2015-06-08 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 d88f30ba5 - 0a3c14782


HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to 
BlockPlacementPolicyRackFaultTolerant. (wang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f407fc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f407fc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f407fc8

Branch: refs/heads/YARN-2928
Commit: 0f407fc80e228027e5eb01c620d3dd8b47004335
Parents: bd224ca
Author: Andrew Wang w...@apache.org
Authored: Tue Jun 2 15:48:26 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../BlockPlacementPolicyRackFaultTolarent.java  | 154 --
 .../BlockPlacementPolicyRackFaultTolerant.java  | 154 ++
 ...stBlockPlacementPolicyRackFaultTolarent.java | 209 ---
 ...stBlockPlacementPolicyRackFaultTolerant.java | 209 +++
 5 files changed, 366 insertions(+), 363 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9d427ff..2ce54c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -594,6 +594,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8386. Improve synchronization of 'streamer' reference in
 DFSOutputStream. (Rakesh R via wang)
 
+HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
+BlockPlacementPolicyRackFaultTolerant. (wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
deleted file mode 100644
index 4dbf384..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.net.Node;
-import org.apache.hadoop.net.NodeBase;
-
-import java.util.*;
-
-/**
- * The class is responsible for choosing the desired number of targets
- * for placing block replicas.
- * The strategy is that it tries its best to place the replicas to most racks.
- */
-@InterfaceAudience.Private
-public class BlockPlacementPolicyRackFaultTolarent extends 
BlockPlacementPolicyDefault {
-
-  @Override
-  protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {
-int clusterSize = clusterMap.getNumOfLeaves();
-int totalNumOfReplicas = numOfChosen + numOfReplicas;
-if (totalNumOfReplicas  clusterSize) {
-  numOfReplicas -= (totalNumOfReplicas-clusterSize);
-  totalNumOfReplicas = clusterSize;
-}
-// No calculation needed when there is only one rack or picking one node.
-int numOfRacks = clusterMap.getNumOfRacks();
-if (numOfRacks == 1 || totalNumOfReplicas = 1) {
-  return new int[] {numOfReplicas, totalNumOfReplicas};
-}
-if(totalNumOfReplicasnumOfRacks){
-  return new int[] {numOfReplicas, 1};
-}
-int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
-return new int[] {numOfReplicas, 

[42/50] hadoop git commit: MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections. Contributed by Chang Li

2015-06-08 Thread zjshen
MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections. 
Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8153dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8153dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8153dd6

Branch: refs/heads/YARN-2928
Commit: f8153dd6bf8e72b1c0611a650cc5a55e1c66971b
Parents: 33c0302
Author: Jason Lowe jl...@apache.org
Authored: Fri Jun 5 22:38:31 2015 +
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:00 2015 -0700

--
 .../src/main/conf/log4j.properties  | 21 
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../apache/hadoop/mapred/ShuffleHandler.java| 15 --
 3 files changed, 29 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8153dd6/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 3a0a3ad..dcffead 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -67,7 +67,7 @@ log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p 
%c: %m%n
 
 #
 # console
-# Add console to rootlogger above if you want to use this 
+# Add console to rootlogger above if you want to use this
 #
 
 log4j.appender.console=org.apache.log4j.ConsoleAppender
@@ -110,7 +110,7 @@ hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
@@ -120,7 +120,7 @@ 
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
 #
 # Daily Rolling Security appender
 #
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
@@ -184,9 +184,9 @@ log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
 #
-# Job Summary Appender 
+# Job Summary Appender
 #
-# Use following logger to send summary to separate file defined by 
+# Use following logger to send summary to separate file defined by
 # hadoop.mapreduce.jobsummary.log.file :
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # 
@@ -204,7 +204,12 @@ 
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduc
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 #
-# Yarn ResourceManager Application Summary Log 
+# shuffle connection log from shuffleHandler
+# Uncomment the following line to enable logging of shuffle connections
+# log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
+
+#
+# Yarn ResourceManager Application Summary Log
 #
 # Set the ResourceManager summary log filename
 yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
@@ -212,8 +217,8 @@ 
yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
 yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
-# To enable AppSummaryLogging for the RM, 
-# set yarn.server.resourcemanager.appsummary.logger to 
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
 # LEVEL,RMSUMMARY in hadoop-env.sh
 
 # Appender for ResourceManager Application Summary Log

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8153dd6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e7c02c0..4202ae4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -364,6 +364,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the 
 results file if its job fails. (Harsh J via devaraj)
 
+MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections
+ 

[28/50] hadoop git commit: HADOOP-11924. Tolerate JDK-8047340-related exceptions in Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)

2015-06-08 Thread zjshen
HADOOP-11924. Tolerate JDK-8047340-related exceptions in 
Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96a8d01a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96a8d01a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96a8d01a

Branch: refs/heads/YARN-2928
Commit: 96a8d01a380c904c84053c3a106a738f018eb5ff
Parents: b200b88
Author: Gera Shegalov g...@apache.org
Authored: Thu Jun 4 11:38:28 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../src/main/java/org/apache/hadoop/util/Shell.java  | 11 ++-
 .../java/org/apache/hadoop/util/TestStringUtils.java |  3 ---
 3 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 942d9e9..5f4bdb8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -823,6 +823,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible.
 (Kengo Seki via Arpit Agarwal)
 
+HADOOP-11924. Tolerate JDK-8047340-related exceptions in
+Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index f0100d4..c76c921 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -392,7 +392,16 @@ abstract public class Shell {
 } catch (IOException ioe) {
   LOG.debug(setsid is not available on this machine. So not using it.);
   setsidSupported = false;
-} finally { // handle the exit code
+}  catch (Error err) {
+  if (err.getMessage().contains(posix_spawn is not  +
+  a supported process launch mechanism)
+   (Shell.FREEBSD || Shell.MAC)) {
+// HADOOP-11924: This is a workaround to avoid failure of class init
+// by JDK issue on TR locale(JDK-8047340).
+LOG.info(Avoiding JDK-8047340 on BSD-based systems., err);
+setsidSupported = false;
+  }
+}  finally { // handle the exit code
   if (LOG.isDebugEnabled()) {
 LOG.debug(setsid exited with exit code 
  + (shexec != null ? shexec.getExitCode() : (null 
executor)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 5b0715f..85ab8c4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -417,9 +417,6 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
 
   @Test
   public void testLowerAndUpperStrings() {
-// Due to java bug 
http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8047340,
-// The test will fail with Turkish locality on Mac OS.
-Assume.assumeTrue(Shell.LINUX);
 Locale defaultLocale = Locale.getDefault();
 try {
   Locale.setDefault(new Locale(tr, TR));



[26/50] hadoop git commit: HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable to private. Contributed by Rakesh R.

2015-06-08 Thread zjshen
HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable to 
private. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9ee232e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9ee232e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9ee232e

Branch: refs/heads/YARN-2928
Commit: d9ee232e895222fef7c1cb6b6e4158246e5ebc6f
Parents: e72a346
Author: Andrew Wang w...@apache.org
Authored: Thu Jun 4 11:09:19 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ee232e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb65105..181f52b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -597,6 +597,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
 BlockPlacementPolicyRackFaultTolerant. (wang)
 
+HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable
+to private. (Rakesh R via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ee232e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 1dc4a9f..695e6da 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -107,7 +107,7 @@ public class DFSOutputStream extends FSOutputSummer
   protected final int bytesPerChecksum;
 
   protected DFSPacket currentPacket = null;
-  protected DataStreamer streamer;
+  private DataStreamer streamer;
   protected int packetSize = 0; // write packet size, not including the header.
   protected int chunksPerPacket = 0;
   protected long lastFlushOffset = 0; // offset when flush was invoked



[31/50] hadoop git commit: HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all block replicas. Contributed by Eddy Xu.

2015-06-08 Thread zjshen
HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all block 
replicas. Contributed by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42ba35bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42ba35bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42ba35bd

Branch: refs/heads/YARN-2928
Commit: 42ba35bdde24d2ec521fccd355fad9e02cddf57c
Parents: e8bed30
Author: Andrew Wang w...@apache.org
Authored: Thu Jun 4 15:35:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:58 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 6 --
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 2 +-
 3 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 181f52b..48d8eb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -600,6 +600,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable
 to private. (Rakesh R via wang)
 
+HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all
+block replicas. (Eddy Xu via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 11f2c32..b978189 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -910,9 +910,11 @@ public class DFSAdmin extends FsShell {
   commonUsageSummary;
 
 String report =-report [-live] [-dead] [-decommissioning]:\n +
-  \tReports basic filesystem information and statistics.\n +
+  \tReports basic filesystem information and statistics. \n +
+  \tThe dfs usage can be different from \du\ usage, because it\n +
+  \tmeasures raw space used by replication, checksums, snapshots\n +
+  \tand etc. on all the DNs.\n +
   \tOptional flags may be used to filter the list of displayed DNs.\n;
-
 
 String safemode = -safemode enter|leave|get|wait:  Safe mode 
maintenance command.\n + 
   \t\tSafe mode is a Namenode state in which it\n +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 2121958..fab15f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -336,7 +336,7 @@ Usage:
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic 
filesystem information and statistics. Optional flags may be used to filter the 
list of displayed DataNodes. |
+| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic 
filesystem information and statistics, The dfs usage can be different from du 
usage, because it measures raw space used by replication, checksums, snapshots 
and etc. on all the DNs. Optional flags may be used to filter the list of 
displayed DataNodes. |
 | `-safemode` enter\|leave\|get\|wait | Safe mode maintenance command. Safe 
mode is a Namenode state in which it br/1. does not accept changes to the 
name space (read-only) br/2. does not replicate or delete blocks. br/Safe 
mode is entered automatically at Namenode startup, and leaves safe mode 
automatically when the configured minimum percentage of blocks satisfies the 
minimum replication condition. Safe mode can also be entered manually, but then 
it can only be turned off manually as well. |
 | `-saveNamespace` | Save current namespace into storage directories and reset 
edits log. Requires safe mode. |
 | `-rollEdits` | Rolls the edit log on the active NameNode. |



[14/50] hadoop git commit: YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)

2015-06-08 Thread zjshen
YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9e8f791
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9e8f791
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9e8f791

Branch: refs/heads/YARN-2928
Commit: b9e8f791333fcebd79e96b3ccb8f998572aecaa1
Parents: 95dd42b
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Jun 3 13:47:24 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../scheduler/fair/FSParentQueue.java   | 219 ++-
 .../scheduler/fair/QueueManager.java|   3 +-
 3 files changed, 164 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e8f791/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1841d80..fb9badc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -573,6 +573,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3751. Fixed AppInfo to check if used resources are null. (Sunil G via
 zjshen)
 
+YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e8f791/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index f74106a..7d2e5b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -23,6 +23,9 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,36 +47,64 @@ public class FSParentQueue extends FSQueue {
   private static final Log LOG = LogFactory.getLog(
   FSParentQueue.class.getName());
 
-  private final ListFSQueue childQueues = 
-  new ArrayListFSQueue();
+  private final ListFSQueue childQueues = new ArrayList();
   private Resource demand = Resources.createResource(0);
   private int runnableApps;
-  
+
+  private ReadWriteLock rwLock = new ReentrantReadWriteLock();
+  private Lock readLock = rwLock.readLock();
+  private Lock writeLock = rwLock.writeLock();
+
   public FSParentQueue(String name, FairScheduler scheduler,
   FSParentQueue parent) {
 super(name, scheduler, parent);
   }
   
   public void addChildQueue(FSQueue child) {
-childQueues.add(child);
+writeLock.lock();
+try {
+  childQueues.add(child);
+} finally {
+  writeLock.unlock();
+}
+  }
+
+  public void removeChildQueue(FSQueue child) {
+writeLock.lock();
+try {
+  childQueues.remove(child);
+} finally {
+  writeLock.unlock();
+}
   }
 
   @Override
   public void recomputeShares() {
-policy.computeShares(childQueues, getFairShare());
-for (FSQueue childQueue : childQueues) {
-  childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-  childQueue.recomputeShares();
+readLock.lock();
+try {
+  policy.computeShares(childQueues, getFairShare());
+  for (FSQueue childQueue : childQueues) {
+childQueue.getMetrics().setFairShare(childQueue.getFairShare());
+childQueue.recomputeShares();
+  }
+} finally {
+  readLock.unlock();
 }
   }
 
   public void recomputeSteadyShares() {
-policy.computeSteadyShares(childQueues, getSteadyFairShare());
-for (FSQueue childQueue : childQueues) {
-  
childQueue.getMetrics().setSteadyFairShare(childQueue.getSteadyFairShare());
-  if (childQueue instanceof 

[29/50] hadoop git commit: YARN-2392. Add more diags about app retry limits on AM failures. Contributed by Steve Loughran

2015-06-08 Thread zjshen
YARN-2392. Add more diags about app retry limits on AM failures. Contributed by 
Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b200b880
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b200b880
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b200b880

Branch: refs/heads/YARN-2928
Commit: b200b88082b28fd375d440e4e9093143a35639c6
Parents: d9ee232
Author: Jian He jia...@apache.org
Authored: Thu Jun 4 11:14:09 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:57 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 16 +---
 .../rmapp/attempt/RMAppAttemptImpl.java |  4 ++--
 3 files changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 972066d..1c36c9b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -380,6 +380,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3467. Expose allocatedMB, allocatedVCores, and runningContainers 
metrics on 
 running Applications in RM Web UI. (Anubhav Dhoot via kasha)
 
+YARN-2392. Add more diags about app retry limits on AM failures. (Steve
+Loughran via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 040ee49..a68fc77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1076,9 +1076,19 @@ public class RMAppImpl implements RMApp, Recoverable {
   +  failed due to  + failedEvent.getDiagnostics()
   + . Failing the application.;
 } else if (this.isNumAttemptsBeyondThreshold) {
-  msg = Application  + this.getApplicationId() +  failed 
-  + this.maxAppAttempts +  times due to 
-  + failedEvent.getDiagnostics() + . Failing the application.;
+  int globalLimit = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+  YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+  msg = String.format(
+Application %s failed %d times%s%s due to %s. Failing the 
application.,
+  getApplicationId(),
+  maxAppAttempts,
+  (attemptFailuresValidityInterval = 0 ? 
+   : ( in previous  + attemptFailuresValidityInterval
+  +  milliseconds)),
+  (globalLimit == maxAppAttempts) ? 
+  : ( (global limit = + globalLimit
+ + ; local limit is = + maxAppAttempts + )),
+  failedEvent.getDiagnostics());
 }
 return msg;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 684dde8..5171bba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 

[09/50] hadoop git commit: HDFS-3716. Purger should remove stale fsimage ckpt files (Contributed by J.Andreina)

2015-06-08 Thread zjshen
HDFS-3716. Purger should remove stale fsimage ckpt files (Contributed by 
J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dadcb31e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dadcb31e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dadcb31e

Branch: refs/heads/YARN-2928
Commit: dadcb31eba92de47316bb7b3f0a084caaf8ad906
Parents: 6de6796
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jun 3 15:30:40 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:43:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/FSImage.java|  1 +
 .../hdfs/server/namenode/TestFSImage.java   | 41 
 3 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3e25129..d65e513 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -843,6 +843,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8470. fsimage loading progress should update inode, delegation token 
and
 cache pool count. (surendra singh lilhore via vinayakumarb)
 
+HDFS-3716. Purger should remove stale fsimage ckpt files
+(J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 45184e7..cd7cf18 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -1209,6 +1209,7 @@ public class FSImage implements Closeable {
   // Since we now have a new checkpoint, we can clean up some
   // old edit logs and checkpoints.
   purgeOldStorage(nnf);
+  archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_NEW);
 } finally {
   // Notify any threads waiting on the checkpoint to be canceled
   // that it is complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 27a1bd3..df20fd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -43,6 +44,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
@@ -118,6 +120,45 @@ public class TestFSImage {
 }
   }
 
+   /**
+   * On checkpointing , stale fsimage checkpoint file should be deleted.
+   */
+  @Test
+  public void testRemovalStaleFsimageCkpt() throws IOException {
+MiniDFSCluster cluster = null;
+SecondaryNameNode secondary = null;
+Configuration conf = new HdfsConfiguration();
+try {
+  cluster = new MiniDFSCluster.Builder(conf).
+  numDataNodes(1).format(true).build();
+  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+  0.0.0.0:0);
+  secondary = new SecondaryNameNode(conf);
+  // Do checkpointing
+  secondary.doCheckpoint();
+  NNStorage storage = secondary.getFSImage().storage;
+

[41/50] hadoop git commit: HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (Contributed by Xiaoyu Yao)

2015-06-08 Thread zjshen
HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. 
(Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33c03026
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33c03026
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33c03026

Branch: refs/heads/YARN-2928
Commit: 33c030260a792dec417c2ae7d57a59b2fa7ec4c3
Parents: 01cd698
Author: Xiaoyu Yao x...@apache.org
Authored: Fri Jun 5 15:09:06 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:57:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/server/namenode/FSNamesystem.java  | 76 +---
 2 files changed, 35 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c03026/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 48d8eb3..72ab17b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -957,6 +957,8 @@ Release 2.7.1 - UNRELEASED
 HDFS-8523. Remove usage information on unsupported operation
 fsck -showprogress from branch-2 (J.Andreina via vinayakumarb)
 
+HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (xyao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c03026/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5ed069d..dfbf04e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1956,10 +1956,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   long mtime)
   throws IOException, UnresolvedLinkException {
 String src = srcArg;
-if (NameNode.stateChangeLog.isDebugEnabled()) {
-  NameNode.stateChangeLog.debug(DIR* NameSystem.truncate: src=
-  + src +  newLength= + newLength);
-}
+NameNode.stateChangeLog.debug(
+DIR* NameSystem.truncate: src={} newLength={}, src, newLength);
 if (newLength  0) {
   throw new HadoopIllegalArgumentException(
   Cannot truncate to a negative file size:  + newLength + .);
@@ -2108,10 +2106,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
   getBlockManager().addBlockCollection(truncatedBlockUC, file);
 
-  NameNode.stateChangeLog.info(BLOCK* prepareFileForTruncate: 
-  + Scheduling copy-on-truncate to new size 
-  + truncatedBlockUC.getNumBytes() +  new block  + newBlock
-  +  old block  + truncatedBlockUC.getTruncateBlock());
+  NameNode.stateChangeLog.debug(
+  BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new +
+   size {}  new block {} old block {}, 
truncatedBlockUC.getNumBytes(),
+  newBlock, truncatedBlockUC.getTruncateBlock());
 } else {
   // Use new generation stamp for in-place truncate recovery
   blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
@@ -2124,10 +2122,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   truncatedBlockUC.getTruncateBlock().setGenerationStamp(
   newBlock.getGenerationStamp());
 
-  NameNode.stateChangeLog.debug(BLOCK* prepareFileForTruncate: 
-  + Scheduling in-place block truncate to new size 
-  + truncatedBlockUC.getTruncateBlock().getNumBytes()
-  +  block= + truncatedBlockUC);
+  NameNode.stateChangeLog.debug(
+  BLOCK* prepareFileForTruncate: {} Scheduling in-place block  +
+  truncate to new size {},
+  truncatedBlockUC.getTruncateBlock().getNumBytes(), truncatedBlockUC);
 }
 if (shouldRecoverNow) {
   truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
@@ -2774,11 +2772,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   String clientMachine, boolean newBlock, boolean logRetryCache)
   throws IOException {
 String src = srcArg;
-if (NameNode.stateChangeLog.isDebugEnabled()) {
-  

[37/50] hadoop git commit: YARN-1462. AHS API and other AHS changes to handle tags for completed MR jobs. Contributed by Xuan Gong

2015-06-08 Thread zjshen
YARN-1462. AHS API and other AHS changes to handle tags for completed MR jobs. 
Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d0d5dce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d0d5dce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d0d5dce

Branch: refs/heads/YARN-2928
Commit: 9d0d5dcef7a426a4b57d2cc1f37fc2c969036711
Parents: f82a100
Author: Xuan xg...@apache.org
Authored: Fri Jun 5 12:48:52 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Jun 8 09:56:59 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../yarn/api/records/ApplicationReport.java | 19 ++
 ...pplicationHistoryManagerOnTimelineStore.java | 19 --
 ...pplicationHistoryManagerOnTimelineStore.java |  9 +
 .../metrics/ApplicationMetricsConstants.java|  1 +
 .../metrics/ApplicationCreatedEvent.java| 10 +-
 .../metrics/SystemMetricsPublisher.java |  4 ++-
 .../metrics/TestSystemMetricsPublisher.java | 37 
 .../src/site/markdown/TimelineServer.md | 30 ++--
 9 files changed, 124 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d5e8bba..3643d0c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -383,6 +383,8 @@ Release 2.8.0 - UNRELEASED
 YARN-2392. Add more diags about app retry limits on AM failures. (Steve
 Loughran via jianhe)
 
+YARN-1462. AHS API and other AHS changes to handle tags for completed MR 
jobs. (xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index e5d7254..444a202 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -83,6 +83,25 @@ public abstract class ApplicationReport {
 return report;
   }
 
+  @Private
+  @Unstable
+  public static ApplicationReport newInstance(ApplicationId applicationId,
+  ApplicationAttemptId applicationAttemptId, String user, String queue,
+  String name, String host, int rpcPort, Token clientToAMToken,
+  YarnApplicationState state, String diagnostics, String url,
+  long startTime, long finishTime, FinalApplicationStatus finalStatus,
+  ApplicationResourceUsageReport appResources, String origTrackingUrl,
+  float progress, String applicationType, Token amRmToken,
+  SetString tags) {
+ApplicationReport report =
+newInstance(applicationId, applicationAttemptId, user, queue, name,
+  host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
+  finishTime, finalStatus, appResources, origTrackingUrl, progress,
+  applicationType, amRmToken);
+report.setApplicationTags(tags);
+return report;
+  }
+
   /**
* Get the codeApplicationId/code of the application.
* @return codeApplicationId/code of the application

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 9bfd2d6..0c7fdc0 100644
--- 

[1/2] hadoop git commit: HDFS-8553. Document hdfs class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 82e772bdb - cc960734e
  refs/heads/trunk 84ba1a75b - d2832b3d4


HDFS-8553. Document hdfs class path options. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2832b3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2832b3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2832b3d

Branch: refs/heads/trunk
Commit: d2832b3d4243c6c470c774bc33fd13f70b3e7b72
Parents: 84ba1a7
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:15:38 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:15:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2832b3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 73574b6..01a0285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -609,6 +609,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from
 RollingWindowManager. (Brahma Reddy Battula via xyao)
 
+HDFS-8553. Document hdfs class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2832b3d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index fab15f8..c696477 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -71,9 +71,15 @@ Commands useful for users of a hadoop cluster.
 
 ### `classpath`
 
-Usage: `hdfs classpath`
+Usage: `hdfs classpath [--glob |--jar path |-h |--help]`
 
-Prints the class path needed to get the Hadoop jar and the required libraries
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `dfs`
 



[2/2] hadoop git commit: HDFS-8553. Document hdfs class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
HDFS-8553. Document hdfs class path options. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit d2832b3d4243c6c470c774bc33fd13f70b3e7b72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc960734
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc960734
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc960734

Branch: refs/heads/branch-2
Commit: cc960734e4f23a9e9c3bb20951dc52ddc26f398b
Parents: 82e772b
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:15:38 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:15:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc960734/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ed484ad..0a9b7cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -269,6 +269,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from
 RollingWindowManager. (Brahma Reddy Battula via xyao)
 
+HDFS-8553. Document hdfs class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc960734/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 43614f7..dbb594d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -71,9 +71,15 @@ Commands useful for users of a hadoop cluster.
 
 ### `classpath`
 
-Usage: `hdfs classpath`
+Usage: `hdfs classpath [--glob |--jar path |-h |--help]`
 
-Prints the class path needed to get the Hadoop jar and the required libraries
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `dfs`
 



[2/2] hadoop git commit: MAPREDUCE-6392. Document mapred class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
MAPREDUCE-6392. Document mapred class path options. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit 025a3a8be07ba2b0c27ee39034b506d91aa486fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a002aef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a002aef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a002aef

Branch: refs/heads/branch-2
Commit: 5a002aef12a9fe09c3026bedf803c366a360a799
Parents: d9016b0
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:22:03 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:22:12 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 +++
 .../src/site/markdown/MapredCommands.md   | 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a002aef/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 02c6492..3f9ec14 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -82,6 +82,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections
 (Chang Li via jlowe)
 
+MAPREDUCE-6392. Document mapred class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a002aef/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
index 98ff588..31c2723 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
@@ -55,9 +55,15 @@ Creates a hadoop archive. More information can be found at
 
 ### `classpath`
 
-Prints the class path needed to get the Hadoop jar and the required libraries.
+Usage: `yarn classpath [--glob |--jar path |-h |--help]`
 
-Usage: `mapred classpath`
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `distcp`
 



[1/2] hadoop git commit: MAPREDUCE-6392. Document mapred class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d9016b028 - 5a002aef1
  refs/heads/trunk a531b058a - 025a3a8be


MAPREDUCE-6392. Document mapred class path options. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/025a3a8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/025a3a8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/025a3a8b

Branch: refs/heads/trunk
Commit: 025a3a8be07ba2b0c27ee39034b506d91aa486fc
Parents: a531b05
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:22:03 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:22:03 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 +++
 .../src/site/markdown/MapredCommands.md   | 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/025a3a8b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 29191cd..115fc1b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -343,6 +343,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections
 (Chang Li via jlowe)
 
+MAPREDUCE-6392. Document mapred class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/025a3a8b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
index 9ccee60..0a3c491 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
@@ -55,9 +55,15 @@ Creates a hadoop archive. More information can be found at
 
 ### `classpath`
 
-Prints the class path needed to get the Hadoop jar and the required libraries.
+Usage: `yarn classpath [--glob |--jar path |-h |--help]`
 
-Usage: `mapred classpath`
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `distcp`
 



[2/2] hadoop git commit: YARN-3786. Document yarn class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
YARN-3786. Document yarn class path options. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit a531b058aef48c9bf2e5366ed110e1f817316c1a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9016b02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9016b02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9016b02

Branch: refs/heads/branch-2
Commit: d9016b0286290fd893a977e64cf2c94f71b7c6e8
Parents: cc96073
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:18:36 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:18:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../hadoop-yarn-site/src/site/markdown/YarnCommands.md| 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9016b02/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1d1b403..bf6781f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -249,6 +249,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. 
 (Karthik Kambatla via jianhe)
 
+YARN-3786. Document yarn class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9016b02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index c20e9a4..7f0edbd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -85,9 +85,15 @@ prints applicationattempt(s) report
 
 ### `classpath`
 
-Usage: `yarn classpath`
+Usage: `yarn classpath [--glob |--jar path |-h |--help]`
 
-Prints the class path needed to get the Hadoop jar and the required libraries
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `container`
 



[1/2] hadoop git commit: YARN-3786. Document yarn class path options. Contributed by Brahma Reddy Battula.

2015-06-08 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc960734e - d9016b028
  refs/heads/trunk d2832b3d4 - a531b058a


YARN-3786. Document yarn class path options. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a531b058
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a531b058
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a531b058

Branch: refs/heads/trunk
Commit: a531b058aef48c9bf2e5366ed110e1f817316c1a
Parents: d2832b3
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 16:18:36 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 16:18:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../hadoop-yarn-site/src/site/markdown/YarnCommands.md| 10 --
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a531b058/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3ee3e77..87b3da3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -297,6 +297,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2716. Refactor ZKRMStateStore retry code with Apache Curator. 
 (Karthik Kambatla via jianhe)
 
+YARN-3786. Document yarn class path options.
+(Brahma Reddy Battula via cnauroth)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a531b058/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 765163e..eb0123e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -89,9 +89,15 @@ prints applicationattempt(s) report
 
 ### `classpath`
 
-Usage: `yarn classpath`
+Usage: `yarn classpath [--glob |--jar path |-h |--help]`
 
-Prints the class path needed to get the Hadoop jar and the required libraries
+| COMMAND\_OPTION | Description |
+|: |: |
+| `--glob` | expand wildcards |
+| `--jar` *path* | write classpath as manifest in jar named *path* |
+| `-h`, `--help` | print help |
+
+Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
 ### `container`
 



hadoop git commit: HADOOP-11347. RawLocalFileSystem#mkdir and create should honor umask (Varun Saxena via Colin P. McCabe)

2015-06-08 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 025a3a8be - fc2ed4a1f


HADOOP-11347. RawLocalFileSystem#mkdir and create should honor umask (Varun 
Saxena via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc2ed4a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc2ed4a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc2ed4a1

Branch: refs/heads/trunk
Commit: fc2ed4a1f9a19d61f5e3cb4fd843604f0c7fe95f
Parents: 025a3a8
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Jun 8 17:49:31 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Jun 8 17:49:31 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/fs/RawLocalFileSystem.java|  45 
 .../fs/TestLocalFileSystemPermission.java   | 111 ++-
 3 files changed, 134 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc2ed4a1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index fa6e4b7..ce8baee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -482,6 +482,9 @@ Trunk (Unreleased)
 
 HADOOP-9905. remove dependency of zookeeper for hadoop-client 
(vinayakumarb)
 
+HADOOP-11347. RawLocalFileSystem#mkdir and create should honor umask (Varun
+Saxena via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc2ed4a1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 56dd7ad..b94d9d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -59,6 +59,8 @@ public class RawLocalFileSystem extends FileSystem {
   // Temporary workaround for HADOOP-9652.
   private static boolean useDeprecatedFileStatus = true;
 
+  private FsPermission umask;
+
   @VisibleForTesting
   public static void useStatIfAvailable() {
 useDeprecatedFileStatus = !Stat.isAvailable();
@@ -92,6 +94,7 @@ public class RawLocalFileSystem extends FileSystem {
   public void initialize(URI uri, Configuration conf) throws IOException {
 super.initialize(uri, conf);
 setConf(conf);
+umask = FsPermission.getUMask(conf);
   }
   
   /***
@@ -211,9 +214,13 @@ public class RawLocalFileSystem extends FileSystem {
 private LocalFSFileOutputStream(Path f, boolean append,
 FsPermission permission) throws IOException {
   File file = pathToFile(f);
+  if (!append  permission == null) {
+permission = FsPermission.getFileDefault();
+  }
   if (permission == null) {
 this.fos = new FileOutputStream(file, append);
   } else {
+permission = permission.applyUMask(umask);
 if (Shell.WINDOWS  NativeIO.isAvailable()) {
   this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
   append, permission.toShort());
@@ -484,27 +491,27 @@ public class RawLocalFileSystem extends FileSystem {
   protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
   throws IOException {
 if (permission == null) {
-  return p2f.mkdir();
-} else {
-  if (Shell.WINDOWS  NativeIO.isAvailable()) {
-try {
-  NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
-  return true;
-} catch (IOException e) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug(String.format(
-NativeIO.createDirectoryWithMode error, path = %s, mode = %o,
-p2f, permission.toShort()), e);
-  }
-  return false;
-}
-  } else {
-boolean b = p2f.mkdir();
-if (b) {
-  setPermission(p, permission);
+  permission = FsPermission.getDirDefault();
+}
+permission = permission.applyUMask(umask);
+if (Shell.WINDOWS  NativeIO.isAvailable()) {
+  try {
+NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
+return true;
+  } catch (IOException e) {
+if 

[1/2] hadoop git commit: HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on EOF. Contributed by Ivan Mitic.

2015-06-08 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 116a72096 - f5b0cce7f
  refs/heads/trunk 927577c87 - c45784bc9


HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on EOF. 
Contributed by Ivan Mitic.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c45784bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c45784bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c45784bc

Branch: refs/heads/trunk
Commit: c45784bc9031353b938f4756473937cca759b3dc
Parents: 927577c
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 22:42:14 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 22:42:14 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/azure/AzureNativeFileSystemStore.java|  2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java| 32 +--
 .../hadoop/fs/azure/PageBlobOutputStream.java   | 10 ++-
 .../fs/azure/NativeAzureFileSystemBaseTest.java | 79 -
 ...tiveAzureFileSystemContractPageBlobLive.java | 90 
 6 files changed, 204 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c45784bc/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ce8baee..4208aa1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -843,6 +843,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12054. RPC client should not retry for InvalidToken exceptions.
 (Varun Saxena via Arpit Agarwal)
 
+HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on
+EOF. (Ivan Mitic via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c45784bc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 69bda06..7741f17 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2301,7 +2301,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 throws AzureException {
 if (blob instanceof CloudPageBlobWrapper) {
   try {
-return PageBlobInputStream.getPageBlobSize((CloudPageBlobWrapper) blob,
+return PageBlobInputStream.getPageBlobDataSize((CloudPageBlobWrapper) 
blob,
 getInstrumentedContext(
 isConcurrentOOBAppendAllowed()));
   } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c45784bc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 468ac65..097201b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -80,7 +80,7 @@ final class PageBlobInputStream extends InputStream {
* @throws IOException If the format is corrupt.
* @throws StorageException If anything goes wrong in the requests.
*/
-  public static long getPageBlobSize(CloudPageBlobWrapper blob,
+  public static long getPageBlobDataSize(CloudPageBlobWrapper blob,
   OperationContext opContext) throws IOException, StorageException {
 // Get the page ranges for the blob. There should be one range starting
 // at byte 0, but we tolerate (and ignore) ranges after the first one.
@@ -156,7 +156,7 @@ final class PageBlobInputStream extends InputStream {
 }
 if (pageBlobSize == -1) {
   try {
-pageBlobSize = getPageBlobSize(blob, opContext);
+pageBlobSize = getPageBlobDataSize(blob, opContext);
   } catch (StorageException e) {
 throw new IOException(Unable to get page blob size., e);
   }
@@ -179,7 +179,13 @@ final class PageBlobInputStream extends InputStream {
 
   /**
* Check our buffer and download more from the server if needed.
-   * @return true if 

[2/2] hadoop git commit: HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on EOF. Contributed by Ivan Mitic.

2015-06-08 Thread cnauroth
HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on EOF. 
Contributed by Ivan Mitic.

(cherry picked from commit c45784bc9031353b938f4756473937cca759b3dc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5b0cce7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5b0cce7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5b0cce7

Branch: refs/heads/branch-2
Commit: f5b0cce7faaee32fbb0f8f2cec233ff178f208ea
Parents: 116a720
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 22:42:14 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 22:42:24 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/azure/AzureNativeFileSystemStore.java|  2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java| 32 +--
 .../hadoop/fs/azure/PageBlobOutputStream.java   | 10 ++-
 .../fs/azure/NativeAzureFileSystemBaseTest.java | 79 -
 ...tiveAzureFileSystemContractPageBlobLive.java | 90 
 6 files changed, 204 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b0cce7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7bfc5fa..d16262a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -356,6 +356,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12054. RPC client should not retry for InvalidToken exceptions.
 (Varun Saxena via Arpit Agarwal)
 
+HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on
+EOF. (Ivan Mitic via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b0cce7/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 69bda06..7741f17 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2301,7 +2301,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 throws AzureException {
 if (blob instanceof CloudPageBlobWrapper) {
   try {
-return PageBlobInputStream.getPageBlobSize((CloudPageBlobWrapper) blob,
+return PageBlobInputStream.getPageBlobDataSize((CloudPageBlobWrapper) 
blob,
 getInstrumentedContext(
 isConcurrentOOBAppendAllowed()));
   } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b0cce7/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 468ac65..097201b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -80,7 +80,7 @@ final class PageBlobInputStream extends InputStream {
* @throws IOException If the format is corrupt.
* @throws StorageException If anything goes wrong in the requests.
*/
-  public static long getPageBlobSize(CloudPageBlobWrapper blob,
+  public static long getPageBlobDataSize(CloudPageBlobWrapper blob,
   OperationContext opContext) throws IOException, StorageException {
 // Get the page ranges for the blob. There should be one range starting
 // at byte 0, but we tolerate (and ignore) ranges after the first one.
@@ -156,7 +156,7 @@ final class PageBlobInputStream extends InputStream {
 }
 if (pageBlobSize == -1) {
   try {
-pageBlobSize = getPageBlobSize(blob, opContext);
+pageBlobSize = getPageBlobDataSize(blob, opContext);
   } catch (StorageException e) {
 throw new IOException(Unable to get page blob size., e);
   }
@@ -179,7 +179,13 @@ final class PageBlobInputStream extends InputStream {
 
   /**
* Check our buffer and download more from the server if needed.
-   * @return true if there's more data in the buffer, false if we're 

hadoop git commit: HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc. Contributed by Brahma Reddy Battula

2015-06-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5a002aef1 - 116a72096


HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc. Contributed by 
Brahma Reddy Battula

(cherry picked from commit 927577c87ca19e8b5b75722f78e2def6d9386576)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/116a7209
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/116a7209
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/116a7209

Branch: refs/heads/branch-2
Commit: 116a72096d7cda577d774c228a1e9e594a6b9b23
Parents: 5a002ae
Author: Xiaoyu Yao x...@apache.org
Authored: Mon Jun 8 21:57:26 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Mon Jun 8 22:03:07 2015 -0700

--
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7209/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 788d48e..b1f5920 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -84,7 +84,7 @@ public abstract class ZKFailoverController {
   };
   
   protected static final String USAGE = 
-  Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ];
+  Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ];
 
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7209/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0a9b7cd..6dc2d86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -272,6 +272,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8553. Document hdfs class path options.
 (Brahma Reddy Battula via cnauroth)
 
+HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc.
+(Brahma Reddy Battula via xyao)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7209/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 29c783c..5d69fac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -245,7 +245,7 @@ public class NameNode implements NameNodeStatusMXBean {
 DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
   
-  private static final String USAGE = Usage: java NameNode [
+  private static final String USAGE = Usage: hdfs namenode [
   + StartupOption.BACKUP.getName() + ] | \n\t[
   + StartupOption.CHECKPOINT.getName() + ] | \n\t[
   + StartupOption.FORMAT.getName() +  [



hadoop git commit: HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc. Contributed by Brahma Reddy Battula

2015-06-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fc2ed4a1f - 927577c87


HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc. Contributed by 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/927577c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/927577c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/927577c8

Branch: refs/heads/trunk
Commit: 927577c87ca19e8b5b75722f78e2def6d9386576
Parents: fc2ed4a
Author: Xiaoyu Yao x...@apache.org
Authored: Mon Jun 8 21:57:26 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Mon Jun 8 21:57:26 2015 -0700

--
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/927577c8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 788d48e..b1f5920 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -84,7 +84,7 @@ public abstract class ZKFailoverController {
   };
   
   protected static final String USAGE = 
-  Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ];
+  Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ];
 
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/927577c8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 01a0285..5147e38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -612,6 +612,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8553. Document hdfs class path options.
 (Brahma Reddy Battula via cnauroth)
 
+HDFS-8552. Fix hdfs CLI usage message for namenode and zkfc.
+(Brahma Reddy Battula via xyao)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/927577c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 268abeb..469352f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -246,7 +246,7 @@ public class NameNode implements NameNodeStatusMXBean {
 DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
   
-  private static final String USAGE = Usage: java NameNode [
+  private static final String USAGE = Usage: hdfs namenode [
   + StartupOption.BACKUP.getName() + ] | \n\t[
   + StartupOption.CHECKPOINT.getName() + ] | \n\t[
   + StartupOption.FORMAT.getName() +  [



hadoop git commit: YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.

2015-06-08 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 953c4a049 - 2a01c01af


YARN-3780. Should use equals when compare Resource in
RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.

(cherry picked from commit c7ee6c151c5771043a6de3b8a951cea13f59dd7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a01c01a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a01c01a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a01c01a

Branch: refs/heads/branch-2
Commit: 2a01c01af7d5d5bc2bf7e0223bffde6c081b4c21
Parents: 953c4a0
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 11:54:55 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Jun 8 11:56:58 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a01c01a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0fb8b04..e0272b5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -453,6 +453,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
 and container reservation. (Zhihai Xu via kasha)
 
+YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
+(zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a01c01a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1263692..8a810cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -622,7 +622,8 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 rmNode.httpPort = newNode.getHttpPort();
 rmNode.httpAddress = newNode.getHttpAddress();
 boolean isCapabilityChanged = false;
-if (rmNode.getTotalCapability() != newNode.getTotalCapability()) {
+if (!rmNode.getTotalCapability().equals(
+newNode.getTotalCapability())) {
   rmNode.totalCapability = newNode.getTotalCapability();
   isCapabilityChanged = true;
 }



hadoop git commit: YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.

2015-06-08 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk a6cb4894b - c7ee6c151


YARN-3780. Should use equals when compare Resource in
RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ee6c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ee6c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ee6c15

Branch: refs/heads/trunk
Commit: c7ee6c151c5771043a6de3b8a951cea13f59dd7b
Parents: a6cb489
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 11:54:55 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Jun 8 11:54:55 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ee6c15/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d90433d..59e3509 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -501,6 +501,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
 and container reservation. (Zhihai Xu via kasha)
 
+YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
+(zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ee6c15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1263692..8a810cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -622,7 +622,8 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 rmNode.httpPort = newNode.getHttpPort();
 rmNode.httpAddress = newNode.getHttpAddress();
 boolean isCapabilityChanged = false;
-if (rmNode.getTotalCapability() != newNode.getTotalCapability()) {
+if (!rmNode.getTotalCapability().equals(
+newNode.getTotalCapability())) {
   rmNode.totalCapability = newNode.getTotalCapability();
   isCapabilityChanged = true;
 }



hadoop git commit: YARN-3747. TestLocalDirsHandlerService should delete the created test directory logDir2. Contributed by David Moore.

2015-06-08 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2a01c01af - 8c643e3bf


YARN-3747. TestLocalDirsHandlerService should delete the created test
directory logDir2. Contributed by David Moore.

(cherry picked from commit 126321eded7dc38c1eef2cfde9365404c924a5cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c643e3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c643e3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c643e3b

Branch: refs/heads/branch-2
Commit: 8c643e3bf9e0a70229aee79fb5244a19b32fef00
Parents: 2a01c01
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 15:32:13 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Jun 8 15:33:38 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c643e3b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e0272b5..05cab3d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -456,6 +456,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
 (zhihai xu via devaraj)
 
+YARN-3747. TestLocalDirsHandlerService should delete the created test 
directory logDir2.
+(David Moore via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c643e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index a045e62..c61d1f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -144,7 +144,7 @@ public class TestLocalDirsHandlerService {
 FileUtils.deleteDirectory(new File(localDir1));
 FileUtils.deleteDirectory(new File(localDir2));
 FileUtils.deleteDirectory(new File(logDir1));
-FileUtils.deleteDirectory(new File(logDir1));
+FileUtils.deleteDirectory(new File(logDir2));
 dirSvc.close();
   }
 }



hadoop git commit: YARN-3747. TestLocalDirsHandlerService should delete the created test directory logDir2. Contributed by David Moore.

2015-06-08 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7ee6c151 - 126321ede


YARN-3747. TestLocalDirsHandlerService should delete the created test
directory logDir2. Contributed by David Moore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/126321ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/126321ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/126321ed

Branch: refs/heads/trunk
Commit: 126321eded7dc38c1eef2cfde9365404c924a5cb
Parents: c7ee6c1
Author: Devaraj K deva...@apache.org
Authored: Mon Jun 8 15:32:13 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Mon Jun 8 15:32:13 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/126321ed/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 59e3509..f393cad 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -504,6 +504,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3780. Should use equals when compare Resource in 
RMNodeImpl#ReconnectNodeTransition.
 (zhihai xu via devaraj)
 
+YARN-3747. TestLocalDirsHandlerService should delete the created test 
directory logDir2.
+(David Moore via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/126321ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index a045e62..c61d1f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -144,7 +144,7 @@ public class TestLocalDirsHandlerService {
 FileUtils.deleteDirectory(new File(localDir1));
 FileUtils.deleteDirectory(new File(localDir2));
 FileUtils.deleteDirectory(new File(logDir1));
-FileUtils.deleteDirectory(new File(logDir1));
+FileUtils.deleteDirectory(new File(logDir2));
 dirSvc.close();
   }
 }



[1/2] hadoop git commit: HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers trying to use them. (Brahma Reddy Battula via stevel)

2015-06-08 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8c643e3bf - c3c2b4d31
  refs/heads/trunk 126321ede - 18f680977


HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers 
trying to use them. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3c2b4d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3c2b4d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3c2b4d3

Branch: refs/heads/branch-2
Commit: c3c2b4d3182bea3b4ee065168844c83f5114edcf
Parents: 8c643e3
Author: Steve Loughran ste...@apache.org
Authored: Mon Jun 8 13:02:26 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jun 8 13:02:26 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Client.java  | 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3c2b4d3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2bdfcdf..6b93e54 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -350,6 +350,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11924. Tolerate JDK-8047340-related exceptions in
 Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
 
+HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
+callers trying to use them. (Brahma Reddy Battula via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3c2b4d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 4b4166a..8062dd6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1484,7 +1484,13 @@ public class Client {
   }
 });
   } catch (ExecutionException e) {
-throw new IOException(e);
+Throwable cause = e.getCause();
+// the underlying exception should normally be IOException
+if (cause instanceof IOException) {
+  throw (IOException) cause;
+} else {
+  throw new IOException(cause);
+}
   }
   if (connection.addCall(call)) {
 break;



[2/2] hadoop git commit: HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers trying to use them. (Brahma Reddy Battula via stevel)

2015-06-08 Thread stevel
HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers 
trying to use them. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f68097
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f68097
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f68097

Branch: refs/heads/trunk
Commit: 18f680977684710037c07bb068383791e8a33a9e
Parents: 126321e
Author: Steve Loughran ste...@apache.org
Authored: Mon Jun 8 13:02:26 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jun 8 13:02:56 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Client.java  | 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f68097/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eacc3be..79f3178 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -834,6 +834,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11924. Tolerate JDK-8047340-related exceptions in
 Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
 
+HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
+callers trying to use them. (Brahma Reddy Battula via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f68097/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index feb811e..6996a51 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1484,7 +1484,13 @@ public class Client {
   }
 });
   } catch (ExecutionException e) {
-throw new IOException(e);
+Throwable cause = e.getCause();
+// the underlying exception should normally be IOException
+if (cause instanceof IOException) {
+  throw (IOException) cause;
+} else {
+  throw new IOException(cause);
+}
   }
   if (connection.addCall(call)) {
 break;



[1/2] hadoop git commit: HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris Nauroth.

2015-06-08 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c3c2b4d31 - 8ee50d8ca
  refs/heads/trunk 18f680977 - 0e80d5198


HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e80d519
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e80d519
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e80d519

Branch: refs/heads/trunk
Commit: 0e80d51983942dca3348c8a8401bad3ecbaab010
Parents: 18f6809
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 08:39:02 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 08:39:02 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java | 6 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 .../org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java| 8 
 3 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e80d519/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 5fd89c4..9b9e213 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -731,6 +731,12 @@ public class FileUtil {
   }
 }
 
+if (entry.isLink()) {
+  File src = new File(outputDir, entry.getLinkName());
+  HardLink.createHardLink(src, outputFile);
+  return;
+}
+
 int count;
 byte data[] = new byte[2048];
 BufferedOutputStream outputStream = new BufferedOutputStream(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e80d519/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 853a022..73574b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -864,6 +864,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8539. Hdfs doesnt have class 'debug' in windows.
 (Anu Engineer via cnauroth)
 
+HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e80d519/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 343320c..224abea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -37,11 +37,11 @@ public class TestDatanodeLayoutUpgrade {
 upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
 Configuration conf = new 
Configuration(TestDFSUpgradeFromImage.upgradeConf);
 conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + data);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + data).toURI().toString());
 conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + name);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + name).toURI().toString());
 upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
 .manageDataDfsDirs(false).manageNameDfsDirs(false), null);
   }



[2/2] hadoop git commit: HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris Nauroth.

2015-06-08 Thread cnauroth
HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris 
Nauroth.

(cherry picked from commit 0e80d51983942dca3348c8a8401bad3ecbaab010)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ee50d8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ee50d8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ee50d8c

Branch: refs/heads/branch-2
Commit: 8ee50d8ca7df7ebde0a12c2ff6312b3b449e40ec
Parents: c3c2b4d
Author: cnauroth cnaur...@apache.org
Authored: Mon Jun 8 08:39:02 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jun 8 08:39:14 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java | 6 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 .../org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java| 8 
 3 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee50d8c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 91f00e1..7a340fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -727,6 +727,12 @@ public class FileUtil {
   }
 }
 
+if (entry.isLink()) {
+  File src = new File(outputDir, entry.getLinkName());
+  HardLink.createHardLink(src, outputFile);
+  return;
+}
+
 int count;
 byte data[] = new byte[2048];
 BufferedOutputStream outputStream = new BufferedOutputStream(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee50d8c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 63a128a..ed484ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -527,6 +527,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8539. Hdfs doesnt have class 'debug' in windows.
 (Anu Engineer via cnauroth)
 
+HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee50d8c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 343320c..224abea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -37,11 +37,11 @@ public class TestDatanodeLayoutUpgrade {
 upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
 Configuration conf = new 
Configuration(TestDFSUpgradeFromImage.upgradeConf);
 conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + data);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + data).toURI().toString());
 conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-System.getProperty(test.build.data) + File.separator +
-dfs + File.separator + name);
+new File(System.getProperty(test.build.data),
+dfs + File.separator + name).toURI().toString());
 upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
 .manageDataDfsDirs(false).manageNameDfsDirs(false), null);
   }