hadoop git commit: HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is full. Contributed by Ming Ma.

2015-02-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6dc8812a9 - 685af8a3d


HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is full. 
Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/685af8a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/685af8a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/685af8a3

Branch: refs/heads/trunk
Commit: 685af8a3d0504724fe588daf3722519fedc45b01
Parents: 6dc8812
Author: Kihwal Lee kih...@apache.org
Authored: Tue Feb 17 17:14:58 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Tue Feb 17 17:14:58 2015 -0600

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../main/java/org/apache/hadoop/ipc/Server.java |  1 +
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 3 files changed, 72 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/685af8a3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c1caf5f..d8a85f7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -953,6 +953,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
 the object from S3. (Dan Hecht via stevel).
 
+HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
+full. (Ming Ma via kihwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/685af8a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 475fb11..893e0eb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -666,6 +666,7 @@ public abstract class Server {
 assert !running;
 readSelector.wakeup();
 try {
+  super.interrupt();
   super.join();
 } catch (InterruptedException ie) {
   Thread.currentThread().interrupt();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/685af8a3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index c1b1bfb..2db8522 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -38,9 +38,16 @@ import java.lang.reflect.Proxy;
 import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -1013,6 +1020,67 @@ public class TestRPC {
 }
   }
 
+  /**
+   *  Verify the RPC server can shutdown properly when callQueue is full.
+   */
+  @Test (timeout=3)
+  public void testRPCServerShutdown() throws Exception {
+final int numClients = 3;
+final ListFutureVoid res = new ArrayListFutureVoid();
+final ExecutorService executorService =
+Executors.newFixedThreadPool(numClients);
+final Configuration conf = new Configuration();
+conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+final Server server = new RPC.Builder(conf)
+.setProtocol(TestProtocol.class).setInstance(new TestImpl())
+.setBindAddress(ADDRESS).setPort(0)
+.setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true)
+.build();
+server.start();
+
+final TestProtocol proxy =
+RPC.getProxy(TestProtocol.class, TestProtocol.versionID,
+NetUtils.getConnectAddress(server), conf);
+try {
+  // start a sleep RPC 

hadoop git commit: HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and SnapshotDiffInfo. Contributed by Ray Chiang.

2015-02-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f92a4904b - 6d6c68456


HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and 
SnapshotDiffInfo. Contributed by Ray Chiang.

(cherry picked from commit 6dc8812a95bf369ec1f2e3d8a9473033172736cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d6c6845
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d6c6845
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d6c6845

Branch: refs/heads/branch-2
Commit: 6d6c684567794d30ac7019ef8a0084db7276c86f
Parents: f92a490
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Feb 17 14:47:53 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Feb 17 14:49:03 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../hadoop/hdfs/server/datanode/DataXceiverServer.java  | 4 ++--
 .../hdfs/server/namenode/snapshot/SnapshotDiffInfo.java | 9 +
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6c6845/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a13014a..436a605 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7795. Show warning if not all favored nodes were chosen by namenode
 (kihwal)
 
+HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and
+SnapshotDiffInfo. (Ray Chiang via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6c6845/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 9bf95eb..caf6eaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -252,9 +252,9 @@ class DataXceiverServer implements Runnable {
   // be set true before calling this method.
   synchronized void restartNotifyPeers() {
 assert (datanode.shouldRun == true  datanode.shutdownForUpgrade);
-for (Peer p : peers.keySet()) {
+for (Thread t : peers.values()) {
   // interrupt each and every DataXceiver thread.
-  peers.get(p).interrupt();
+  t.interrupt();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6c6845/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
index 197b8ae..a576c57 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
@@ -187,12 +187,13 @@ class SnapshotDiffInfo {
*/
   public SnapshotDiffReport generateReport() {
 ListDiffReportEntry diffReportList = new ArrayListDiffReportEntry();
-for (INode node : diffMap.keySet()) {
-  diffReportList.add(new DiffReportEntry(DiffType.MODIFY, diffMap
-  .get(node), null));
+for (Map.EntryINode,byte[][] drEntry : diffMap.entrySet()) {
+  INode node = drEntry.getKey();
+  byte[][] path = drEntry.getValue();
+  diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null));
   if (node.isDirectory()) {
 ListDiffReportEntry subList = generateReport(dirDiffMap.get(node),
-diffMap.get(node), isFromEarlier(), renameMap);
+path, isFromEarlier(), renameMap);
 diffReportList.addAll(subList);
   }
 }



hadoop git commit: HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and SnapshotDiffInfo. Contributed by Ray Chiang.

2015-02-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 409113d8f - 6dc8812a9


HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and 
SnapshotDiffInfo. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dc8812a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dc8812a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dc8812a

Branch: refs/heads/trunk
Commit: 6dc8812a95bf369ec1f2e3d8a9473033172736cd
Parents: 409113d
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Feb 17 14:47:53 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Feb 17 14:47:53 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../hadoop/hdfs/server/datanode/DataXceiverServer.java  | 4 ++--
 .../hdfs/server/namenode/snapshot/SnapshotDiffInfo.java | 9 +
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc8812a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 391005c..308b61f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7795. Show warning if not all favored nodes were chosen by namenode
 (kihwal)
 
+HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and
+SnapshotDiffInfo. (Ray Chiang via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc8812a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 9bf95eb..caf6eaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -252,9 +252,9 @@ class DataXceiverServer implements Runnable {
   // be set true before calling this method.
   synchronized void restartNotifyPeers() {
 assert (datanode.shouldRun == true  datanode.shutdownForUpgrade);
-for (Peer p : peers.keySet()) {
+for (Thread t : peers.values()) {
   // interrupt each and every DataXceiver thread.
-  peers.get(p).interrupt();
+  t.interrupt();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc8812a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
index 197b8ae..a576c57 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
@@ -187,12 +187,13 @@ class SnapshotDiffInfo {
*/
   public SnapshotDiffReport generateReport() {
 ListDiffReportEntry diffReportList = new ArrayListDiffReportEntry();
-for (INode node : diffMap.keySet()) {
-  diffReportList.add(new DiffReportEntry(DiffType.MODIFY, diffMap
-  .get(node), null));
+for (Map.EntryINode,byte[][] drEntry : diffMap.entrySet()) {
+  INode node = drEntry.getKey();
+  byte[][] path = drEntry.getValue();
+  diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null));
   if (node.isDirectory()) {
 ListDiffReportEntry subList = generateReport(dirDiffMap.get(node),
-diffMap.get(node), isFromEarlier(), renameMap);
+path, isFromEarlier(), renameMap);
 diffReportList.addAll(subList);
   }
 }



hadoop git commit: YARN-3207. Secondary filter matches entites which do not have the key being filtered for. Contributed by Zhijie Shen

2015-02-17 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 685af8a3d - 57db50cbe


YARN-3207. Secondary filter matches entites which do not have the key
being filtered for. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57db50cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57db50cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57db50cb

Branch: refs/heads/trunk
Commit: 57db50cbe3ce42618ad6d6869ae337d15b261f4e
Parents: 685af8a
Author: Xuan xg...@apache.org
Authored: Tue Feb 17 18:17:29 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Tue Feb 17 18:17:29 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java  | 2 +-
 .../hadoop/yarn/server/timeline/TimelineStoreTestUtils.java   | 3 +++
 3 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57db50cb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 66543cd..cbba046 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -608,6 +608,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2749. Fix some testcases from TestLogAggregationService fails in 
trunk. 
 (Xuan Gong via junping_du)
 
+YARN-3207. Secondary filter matches entites which do not have the key being
+filtered for. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57db50cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index 5f153bd..9fd2cfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -672,7 +672,7 @@ public class LeveldbTimelineStore extends AbstractService
 if (v == null) {
   SetObject vs = entity.getPrimaryFilters()
   .get(filter.getName());
-  if (vs != null  !vs.contains(filter.getValue())) {
+  if (vs == null || !vs.contains(filter.getValue())) {
 filterPassed = false;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57db50cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
index 6f15b92..c99786d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
@@ -767,6 +767,9 @@ public class TimelineStoreTestUtils {
 
 entities = getEntitiesWithFilters(type_1, userFilter, badTestingFilters);
 assertEquals(0, entities.size());
+
+entities = getEntitiesWithFilters(type_5, null, badTestingFilters);
+assertEquals(0, entities.size());
   }
 
   public void testGetEvents() throws IOException {



hadoop git commit: YARN-3207. Secondary filter matches entites which do not have the key being filtered for. Contributed by Zhijie Shen

2015-02-17 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d6c68456 - ba18adbb2


YARN-3207. Secondary filter matches entites which do not have the key
being filtered for. Contributed by Zhijie Shen

(cherry picked from commit 57db50cbe3ce42618ad6d6869ae337d15b261f4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba18adbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba18adbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba18adbb

Branch: refs/heads/branch-2
Commit: ba18adbb27c37a8fa92223a412ce65eaa462d18b
Parents: 6d6c684
Author: Xuan xg...@apache.org
Authored: Tue Feb 17 18:17:29 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Tue Feb 17 18:18:56 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java  | 2 +-
 .../hadoop/yarn/server/timeline/TimelineStoreTestUtils.java   | 3 +++
 3 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba18adbb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b97db6..7ac4e33 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -572,6 +572,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2749. Fix some testcases from TestLogAggregationService fails in 
trunk. 
 (Xuan Gong via junping_du)
 
+YARN-3207. Secondary filter matches entites which do not have the key being
+filtered for. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba18adbb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index 5f153bd..9fd2cfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -672,7 +672,7 @@ public class LeveldbTimelineStore extends AbstractService
 if (v == null) {
   SetObject vs = entity.getPrimaryFilters()
   .get(filter.getName());
-  if (vs != null  !vs.contains(filter.getValue())) {
+  if (vs == null || !vs.contains(filter.getValue())) {
 filterPassed = false;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba18adbb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
index 6f15b92..c99786d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
@@ -767,6 +767,9 @@ public class TimelineStoreTestUtils {
 
 entities = getEntitiesWithFilters(type_1, userFilter, badTestingFilters);
 assertEquals(0, entities.size());
+
+entities = getEntitiesWithFilters(type_5, null, badTestingFilters);
+assertEquals(0, entities.size());
   }
 
   public void testGetEvents() throws IOException {



hadoop git commit: MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure conditions. Contributed by Devaraj K.

2015-02-17 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ba18adbb2 - 9523b52da


MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure conditions. 
Contributed by Devaraj K.

(cherry picked from commit 7c782047c609b29178945bd566a0d162e64dbfdb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9523b52d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9523b52d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9523b52d

Branch: refs/heads/branch-2
Commit: 9523b52da5942cd047ffe351386f7dc9f058040d
Parents: ba18adb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Feb 18 15:45:52 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Feb 18 15:46:06 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../TestClientProtocolProviderImpls.java| 71 ++--
 2 files changed, 25 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9523b52d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 031d256..1332cd7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -134,6 +134,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6221. Stringifier is left unclosed in 
Chain#getChainElementConf().
 (Ted Yu via ozawa)
 
+MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure 
+conditions. (Devaraj K via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9523b52d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
index e71c038..6ad76e9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
@@ -18,30 +18,22 @@
 
 package org.apache.hadoop.mapreduce;
 
-import java.io.IOException;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import junit.framework.TestCase;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.LocalJobRunner;
+import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.junit.Test;
 
-public class TestClientProtocolProviderImpls extends TestCase {
+public class TestClientProtocolProviderImpls {
 
   @Test
   public void testClusterWithLocalClientProvider() throws Exception {
-
 Configuration conf = new Configuration();
-
-try {
-  conf.set(MRConfig.FRAMEWORK_NAME, incorrect);
-  new Cluster(conf);
-  fail(Cluster should not be initialized with incorrect framework name);
-} catch (IOException e) {
-
-}
-
 conf.set(MRConfig.FRAMEWORK_NAME, local);
 Cluster cluster = new Cluster(conf);
 assertTrue(cluster.getClient() instanceof LocalJobRunner);
@@ -50,57 +42,38 @@ public class TestClientProtocolProviderImpls extends 
TestCase {
 
   @Test
   public void testClusterWithJTClientProvider() throws Exception {
-
 Configuration conf = new Configuration();
 try {
-  conf.set(MRConfig.FRAMEWORK_NAME, incorrect);
-  new Cluster(conf);
-  fail(Cluster should not be initialized with incorrect framework name);
-
-} catch (IOException e) {
-
-}
-
-try {
   conf.set(MRConfig.FRAMEWORK_NAME, classic);
   conf.set(JTConfig.JT_IPC_ADDRESS, local);
   new Cluster(conf);
-  fail(Cluster with classic Framework name shouldnot use local JT 
address);
-
+  fail(Cluster with classic Framework name should not use 
+  + local JT address);
 } catch (IOException e) {
-
+  assertTrue(e.getMessage().contains(
+  Cannot initialize Cluster. Please check));
 }
+  }
 
-try {
-  conf = new Configuration();
-  conf.set(MRConfig.FRAMEWORK_NAME, classic);
-  conf.set(JTConfig.JT_IPC_ADDRESS, 127.0.0.1:0);
-  Cluster cluster = new Cluster(conf);
-  cluster.close();
- 

hadoop git commit: MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure conditions. Contributed by Devaraj K.

2015-02-17 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 57db50cbe - 7c782047c


MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure conditions. 
Contributed by Devaraj K.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c782047
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c782047
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c782047

Branch: refs/heads/trunk
Commit: 7c782047c609b29178945bd566a0d162e64dbfdb
Parents: 57db50c
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Feb 18 15:45:52 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Feb 18 15:45:52 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../TestClientProtocolProviderImpls.java| 71 ++--
 2 files changed, 25 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c782047/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 50e067c..e944d82 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -376,6 +376,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6221. Stringifier is left unclosed in 
Chain#getChainElementConf().
 (Ted Yu via ozawa)
 
+MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure 
+conditions. (Devaraj K via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c782047/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
index e71c038..6ad76e9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
@@ -18,30 +18,22 @@
 
 package org.apache.hadoop.mapreduce;
 
-import java.io.IOException;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import junit.framework.TestCase;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.LocalJobRunner;
+import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.junit.Test;
 
-public class TestClientProtocolProviderImpls extends TestCase {
+public class TestClientProtocolProviderImpls {
 
   @Test
   public void testClusterWithLocalClientProvider() throws Exception {
-
 Configuration conf = new Configuration();
-
-try {
-  conf.set(MRConfig.FRAMEWORK_NAME, incorrect);
-  new Cluster(conf);
-  fail(Cluster should not be initialized with incorrect framework name);
-} catch (IOException e) {
-
-}
-
 conf.set(MRConfig.FRAMEWORK_NAME, local);
 Cluster cluster = new Cluster(conf);
 assertTrue(cluster.getClient() instanceof LocalJobRunner);
@@ -50,57 +42,38 @@ public class TestClientProtocolProviderImpls extends 
TestCase {
 
   @Test
   public void testClusterWithJTClientProvider() throws Exception {
-
 Configuration conf = new Configuration();
 try {
-  conf.set(MRConfig.FRAMEWORK_NAME, incorrect);
-  new Cluster(conf);
-  fail(Cluster should not be initialized with incorrect framework name);
-
-} catch (IOException e) {
-
-}
-
-try {
   conf.set(MRConfig.FRAMEWORK_NAME, classic);
   conf.set(JTConfig.JT_IPC_ADDRESS, local);
   new Cluster(conf);
-  fail(Cluster with classic Framework name shouldnot use local JT 
address);
-
+  fail(Cluster with classic Framework name should not use 
+  + local JT address);
 } catch (IOException e) {
-
+  assertTrue(e.getMessage().contains(
+  Cannot initialize Cluster. Please check));
 }
+  }
 
-try {
-  conf = new Configuration();
-  conf.set(MRConfig.FRAMEWORK_NAME, classic);
-  conf.set(JTConfig.JT_IPC_ADDRESS, 127.0.0.1:0);
-  Cluster cluster = new Cluster(conf);
-  cluster.close();
-} catch (IOException e) {
-
-}
+  @Test
+  public void 

[2/4] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
--
diff --git a/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm 
b/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
deleted file mode 100644
index a8b408c..000
--- a/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
+++ /dev/null
@@ -1,439 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-
-  ---
-  Yarn Scheduler Load Simulator (SLS)
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Yarn Scheduler Load Simulator (SLS)
-
-%{toc|section=1|fromDepth=0}
-
-* Overview
-
-** Overview
-
-  The Yarn scheduler is a fertile area of interest with different
-  implementations, e.g., Fifo, Capacity and Fair schedulers. Meanwhile, several
-  optimizations are also made to improve scheduler performance for different
-  scenarios and workload. Each scheduler algorithm has its own set of features,
-  and drives scheduling decisions by many factors, such as fairness, capacity
-  guarantee, resource availability, etc. It is very important to evaluate a
-  scheduler algorithm very well before we deploy in a production cluster.
-  Unfortunately, currently it is non-trivial to evaluate a scheduler algorithm.
-  Evaluating in a real cluster is always time and cost consuming, and it is
-  also very hard to find a large-enough cluster. Hence, a simulator which can
-  predict how well a scheduler algorithm for some specific workload would be
-  quite useful.
-
-  The Yarn Scheduler Load Simulator (SLS) is such a tool, which can simulate
-  large-scale Yarn clusters and application loads in a single machine.This
-  simulator would be invaluable in furthering Yarn by providing a tool for
-  researchers and developers to prototype new scheduler features and predict
-  their behavior and performance with reasonable amount of confidence,
-  thereby aiding rapid innovation.
-
-  The simulator will exercise the real Yarn ResourceManager removing the
-  network factor by simulating NodeManagers and ApplicationMasters
-  via handling and dispatching NM/AMs heartbeat events from within
-  the same JVM. To keep tracking of scheduler behavior and performance, a
-  scheduler wrapper will wrap the real scheduler.
-
-  The size of the cluster and the application load can be loaded from
-  configuration files, which are generated from job history files directly by
-  adopting {{{https://hadoop.apache.org/docs/stable/rumen.html}Apache Rumen}}.
-
-  The simulator will produce real time metrics while executing, including:
-
-  * Resource usages for whole cluster and each queue, which can be utilized to
-configure cluster and queue's capacity.
-
-  * The detailed application execution trace (recorded in relation to simulated
-time), which can be analyzed to understand/validate the scheduler behavior
-(individual jobs turn around time, throughput, fairness, capacity 
guarantee,
-etc.).
-
-  * Several key metrics of scheduler algorithm, such as time cost of each
-scheduler operation (allocate, handle, etc.), which can be utilized by 
Hadoop
-developers to find the code spots and scalability limits.
-
-** Goals
-
-  * Exercise the scheduler at scale without a real cluster using real job
-traces.
-
-  * Being able to simulate real workloads.
-
-** Architecture
-
-  The following figure illustrates the implementation architecture of the
-  simulator.
-
-[images/sls_arch.png] The architecture of the simulator
-
-  The simulator takes input of workload traces, and fetches the cluster and
-  applications information. For each NM and AM, the simulator builds a 
simulator
-  to simulate their running. All NM/AM simulators run in a thread pool. The
-  simulator reuses Yarn Resource Manager, and builds a wrapper out of the
-  scheduler. The Scheduler Wrapper can track the scheduler behaviors and
-  generates several logs, which are the outputs of the simulator and can be
-  further analyzed.
-
-** Usecases
-
-  * Engineering
-
-* Verify correctness of scheduler algorithm under load
-
-* Cheap/practical way for finding code hotspots/critical-path.
-
-* Validate the impact of changes and new features.
-
-* Determine what drives the scheduler scalability limits.
-
-  []
-
-  * QA
-
-* Validate scheduler behavior for large clusters and several workload
-

[4/4] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-17 Thread aw
HADOOP-11593. Convert site documentation from apt to markdown (stragglers) 
(Masatake Iwasaki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6fc1f3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6fc1f3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6fc1f3e

Branch: refs/heads/trunk
Commit: b6fc1f3e4355be913b7d4f6ccd48c0c26b66d039
Parents: 7c78204
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 17 21:30:24 2015 -1000
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Feb 17 21:30:24 2015 -1000

--
 .../hadoop-auth/src/site/apt/BuildingIt.apt.vm  |   70 --
 .../src/site/apt/Configuration.apt.vm   |  377 ---
 .../hadoop-auth/src/site/apt/Examples.apt.vm|  133 ---
 .../hadoop-auth/src/site/apt/index.apt.vm   |   59 -
 .../hadoop-auth/src/site/markdown/BuildingIt.md |   56 +
 .../src/site/markdown/Configuration.md  |  341 ++
 .../hadoop-auth/src/site/markdown/Examples.md   |  109 ++
 .../hadoop-auth/src/site/markdown/index.md  |   43 +
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../hadoop-kms/src/site/apt/index.apt.vm| 1020 --
 .../hadoop-kms/src/site/markdown/index.md.vm|  864 +++
 hadoop-project/src/site/apt/index.apt.vm|   73 --
 hadoop-project/src/site/markdown/index.md.vm|   72 ++
 .../hadoop-openstack/src/site/apt/index.apt.vm  |  686 
 .../hadoop-openstack/src/site/markdown/index.md |  544 ++
 .../src/site/resources/css/site.css |   30 +
 .../src/site/apt/SchedulerLoadSimulator.apt.vm  |  439 
 .../src/site/markdown/SchedulerLoadSimulator.md |  357 ++
 .../src/site/apt/HadoopStreaming.apt.vm |  792 --
 .../src/site/markdown/HadoopStreaming.md.vm |  559 ++
 20 files changed, 2978 insertions(+), 3649 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
--
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm 
b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
deleted file mode 100644
index 2ca2f0a..000
--- a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
+++ /dev/null
@@ -1,70 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
-
-* Requirements
-
-  * Java 6+
-
-  * Maven 3+
-
-  * Kerberos KDC (for running Kerberos test cases)
-
-* Building
-
-  Use Maven goals: clean, test, compile, package, install
-
-  Available profiles: docs, testKerberos
-
-* Testing
-
-  By default Kerberos testcases are not run.
-
-  The requirements to run Kerberos testcases are a running KDC, a keytab
-  file with a client principal and a kerberos principal.
-
-  To run Kerberos tescases use the testKerberos Maven profile:
-
-+---+
-$ mvn test -PtestKerberos
-+---+
-
-  The following Maven -D options can be used to change the default
-  values:
-
-  * hadoop-auth.test.kerberos.realm: default value LOCALHOST
-
-  * hadoop-auth.test.kerberos.client.principal: default value client
-
-  * hadoop-auth.test.kerberos.server.principal: default value
-HTTP/localhost (it must start 'HTTP/')
-
-  * hadoop-auth.test.kerberos.keytab.file: default value
-${HOME}/${USER}.keytab
-
-** Generating Documentation
-
-  To create the documentation use the docs Maven profile:
-
-+---+
-$ mvn package -Pdocs
-+---+
-
-  The generated documentation is available at
-  hadoop-auth/target/site/.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm 
b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
deleted file mode 100644
index 88248e5..000
--- a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
+++ /dev/null
@@ 

[3/4] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
new file mode 100644
index 000..44b5bfb
--- /dev/null
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -0,0 +1,864 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+#set ( $H5 = '#' )
+
+Hadoop Key Management Server (KMS) - Documentation Sets
+===
+
+Hadoop KMS is a cryptographic key management server based on Hadoop's 
**KeyProvider** API.
+
+It provides a client and a server components which communicate over HTTP using 
a REST API.
+
+The client is a KeyProvider implementation interacts with the KMS using the 
KMS HTTP REST API.
+
+KMS and its client have built-in security and they support HTTP SPNEGO 
Kerberos authentication and HTTPS secure transport.
+
+KMS is a Java web-application and it runs using a pre-configured Tomcat 
bundled with the Hadoop distribution.
+
+KMS Client Configuration
+
+
+The KMS client `KeyProvider` uses the **kms** scheme, and the embedded URL 
must be the URL of the KMS. For example, for a KMS running on 
`http://localhost:16000/kms`, the KeyProvider URI is 
`kms://http@localhost:16000/kms`. And, for a KMS running on 
`https://localhost:16000/kms`, the KeyProvider URI is 
`kms://https@localhost:16000/kms`
+
+KMS
+---
+
+$H3 KMS Configuration
+
+Configure the KMS backing KeyProvider properties in the 
`etc/hadoop/kms-site.xml` configuration file:
+
+```xml
+  property
+ namehadoop.kms.key.provider.uri/name
+ valuejceks://file@/${user.home}/kms.keystore/value
+  /property
+
+  property
+namehadoop.security.keystore.java-keystore-provider.password-file/name
+valuekms.keystore.password/value
+  /property
+```
+
+The password file is looked up in the Hadoop's configuration directory via the 
classpath.
+
+NOTE: You need to restart the KMS for the configuration changes to take effect.
+
+$H3 KMS Cache
+
+KMS caches keys for short period of time to avoid excessive hits to the 
underlying key provider.
+
+The Cache is enabled by default (can be dissabled by setting the 
`hadoop.kms.cache.enable` boolean property to false)
+
+The cache is used with the following 3 methods only, `getCurrentKey()` and 
`getKeyVersion()` and `getMetadata()`.
+
+For the `getCurrentKey()` method, cached entries are kept for a maximum of 
3 millisecond regardless the number of times the key is being access (to 
avoid stale keys to be considered current).
+
+For the `getKeyVersion()` method, cached entries are kept with a default 
inactivity timeout of 60 milliseconds (10 mins). This time out is 
configurable via the following property in the `etc/hadoop/kms-site.xml` 
configuration file:
+
+```xml
+   property
+ namehadoop.kms.cache.enable/name
+ valuetrue/value
+   /property
+
+   property
+ namehadoop.kms.cache.timeout.ms/name
+ value60/value
+   /property
+
+   property
+ namehadoop.kms.current.key.cache.timeout.ms/name
+ value3/value
+   /property
+```
+
+$H3 KMS Aggregated Audit logs
+
+Audit logs are aggregated for API accesses to the GET\_KEY\_VERSION, 
GET\_CURRENT\_KEY, DECRYPT\_EEK, GENERATE\_EEK operations.
+
+Entries are grouped by the (user,key,operation) combined key for a 
configurable aggregation interval after which the number of accesses to the 
specified end-point by the user for a given key is flushed to the audit log.
+
+The Aggregation interval is configured via the property :
+
+  property
+namehadoop.kms.aggregation.delay.ms/name
+value1/value
+  /property
+
+$H3 Start/Stop the KMS
+
+To start/stop KMS use KMS's bin/kms.sh script. For example:
+
+hadoop-${project.version} $ sbin/kms.sh start
+
+NOTE: Invoking the script without any parameters list all possible parameters 
(start, stop, run, etc.). The `kms.sh` script is a wrapper for Tomcat's 
`catalina.sh` script that sets the environment variables and Java System 
properties required to run KMS.
+
+$H3 Embedded Tomcat Configuration
+
+To configure the embedded Tomcat go to the `share/hadoop/kms/tomcat/conf`.
+
+KMS pre-configures the 

[1/4] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-17 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7c782047c - b6fc1f3e4


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
new file mode 100644
index 000..0b64586
--- /dev/null
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -0,0 +1,559 @@
+%!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+#set ( $H5 = '#' )
+
+Hadoop Streaming
+
+
+* [Hadoop Streaming](#Hadoop_Streaming)
+* [Hadoop Streaming](#Hadoop_Streaming)
+* [How Streaming Works](#How_Streaming_Works)
+* [Streaming Command Options](#Streaming_Command_Options)
+* [Specifying a Java Class as the 
Mapper/Reducer](#Specifying_a_Java_Class_as_the_MapperReducer)
+* [Packaging Files With Job 
Submissions](#Packaging_Files_With_Job_Submissions)
+* [Specifying Other Plugins for 
Jobs](#Specifying_Other_Plugins_for_Jobs)
+* [Setting Environment Variables](#Setting_Environment_Variables)
+* [Generic Command Options](#Generic_Command_Options)
+* [Specifying Configuration Variables with the -D 
Option](#Specifying_Configuration_Variables_with_the_-D_Option)
+* [Specifying Directories](#Specifying_Directories)
+* [Specifying Map-Only Jobs](#Specifying_Map-Only_Jobs)
+* [Specifying the Number of 
Reducers](#Specifying_the_Number_of_Reducers)
+* [Customizing How Lines are Split into Key/Value 
Pairs](#Customizing_How_Lines_are_Split_into_KeyValue_Pairs)
+* [Working with Large Files and 
Archives](#Working_with_Large_Files_and_Archives)
+* [Making Files Available to 
Tasks](#Making_Files_Available_to_Tasks)
+* [Making Archives Available to 
Tasks](#Making_Archives_Available_to_Tasks)
+* [More Usage Examples](#More_Usage_Examples)
+* [Hadoop Partitioner Class](#Hadoop_Partitioner_Class)
+* [Hadoop Comparator Class](#Hadoop_Comparator_Class)
+* [Hadoop Aggregate Package](#Hadoop_Aggregate_Package)
+* [Hadoop Field Selection Class](#Hadoop_Field_Selection_Class)
+* [Frequently Asked Questions](#Frequently_Asked_Questions)
+* [How do I use Hadoop Streaming to run an arbitrary set of (semi) 
independent 
tasks?](#How_do_I_use_Hadoop_Streaming_to_run_an_arbitrary_set_of_semi_independent_tasks)
+* [How do I process files, one per 
map?](#How_do_I_process_files_one_per_map)
+* [How many reducers should I use?](#How_many_reducers_should_I_use)
+* [If I set up an alias in my shell script, will that work after 
-mapper?](#If_I_set_up_an_alias_in_my_shell_script_will_that_work_after_-mapper)
+* [Can I use UNIX pipes?](#Can_I_use_UNIX_pipes)
+* [What do I do if I get the No space left on device 
error?](#What_do_I_do_if_I_get_the_No_space_left_on_device_error)
+* [How do I specify multiple input 
directories?](#How_do_I_specify_multiple_input_directories)
+* [How do I generate output files with gzip 
format?](#How_do_I_generate_output_files_with_gzip_format)
+* [How do I provide my own input/output format with 
streaming?](#How_do_I_provide_my_own_inputoutput_format_with_streaming)
+* [How do I parse XML documents using 
streaming?](#How_do_I_parse_XML_documents_using_streaming)
+* [How do I update counters in streaming 
applications?](#How_do_I_update_counters_in_streaming_applications)
+* [How do I update status in streaming 
applications?](#How_do_I_update_status_in_streaming_applications)
+* [How do I get the Job variables in a streaming job's 
mapper/reducer?](#How_do_I_get_the_Job_variables_in_a_streaming_jobs_mapperreducer)
+
+Hadoop Streaming
+
+
+Hadoop streaming is a utility that comes with the Hadoop distribution. The 
utility allows you to create and run Map/Reduce jobs with any executable or 
script as the mapper and/or the reducer. For example:
+
+hadoop jar hadoop-streaming-${project.version}.jar \
+  -input myInputDirs \
+  -output myOutputDir \
+  -mapper /bin/cat \
+  -reducer /usr/bin/wc
+
+How Streaming Works

hadoop git commit: HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)

2015-02-17 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk f24a56787 - f0412de1c


HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0412de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0412de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0412de1

Branch: refs/heads/trunk
Commit: f0412de1c1d42b3c2a92531f81d97a24df920523
Parents: f24a567
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Tue Feb 17 21:28:49 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Tue Feb 17 21:28:49 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/BookKeeperJournalManager.java |  15 +-
 .../bkjournal/TestBookKeeperJournalManager.java | 153 ++-
 3 files changed, 163 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0412de1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fcf5994..f28e41e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -639,6 +639,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)
 
+HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0412de1/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 51905c0..89fa84c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -152,6 +152,13 @@ public class BookKeeperJournalManager implements 
JournalManager {
 = dfs.namenode.bookkeeperjournal.readEntryTimeoutSec;
   public static final int BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT = 5;
 
+  public static final String BKJM_BOOKKEEPER_ACK_QUORUM_SIZE 
+= dfs.namenode.bookkeeperjournal.ack.quorum-size;
+
+  public static final String BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC
+= dfs.namenode.bookkeeperjournal.addEntryTimeoutSec;
+  public static final int BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT = 5;
+
   private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
@@ -162,6 +169,8 @@ public class BookKeeperJournalManager implements 
JournalManager {
   private final MaxTxId maxTxId;
   private final int ensembleSize;
   private final int quorumSize;
+  private final int ackQuorumSize;
+  private final int addEntryTimeout;
   private final String digestpw;
   private final int speculativeReadTimeout;
   private final int readEntryTimeout;
@@ -184,6 +193,9 @@ public class BookKeeperJournalManager implements 
JournalManager {
BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
 quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
  BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
+ackQuorumSize = conf.getInt(BKJM_BOOKKEEPER_ACK_QUORUM_SIZE, quorumSize);
+addEntryTimeout = conf.getInt(BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC,
+ BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT);
 speculativeReadTimeout = conf.getInt(
  BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS,
  BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT);
@@ -216,6 +228,7 @@ public class BookKeeperJournalManager implements 
JournalManager {
   ClientConfiguration clientConf = new ClientConfiguration();
   clientConf.setSpeculativeReadTimeout(speculativeReadTimeout);
   clientConf.setReadEntryTimeout(readEntryTimeout);
+  clientConf.setAddEntryTimeout(addEntryTimeout);
   bkc = new BookKeeper(clientConf, zkc);
 } catch (KeeperException e) {
   throw new IOException(Error initializing zk, e);
@@ -403,7 +416,7 @@ public class BookKeeperJournalManager implements 
JournalManager {
 // bookkeeper errored on last stream, clean up 

[1/2] hadoop git commit: HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of the object from S3. (Dan Hecht via stevel).

2015-02-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2cbac36fd - 701b96ca8
  refs/heads/trunk f0412de1c - 826267f78


HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of the 
object from S3. (Dan Hecht via stevel).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/701b96ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/701b96ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/701b96ca

Branch: refs/heads/branch-2
Commit: 701b96ca8e9a89d51ee47a470e524307fea3a035
Parents: 2cbac36
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 16:36:32 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 16:36:32 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 20 
 2 files changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/701b96ca/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4871f45..a8b38ed 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -542,6 +542,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned
 to SERVICE_NOT_RESPONDING (Ming Ma via vinayakumarb)
 
+HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
+the object from S3. (Dan Hecht via stevel).
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/701b96ca/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 4c56b82..685026e 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -37,14 +37,13 @@ public class S3AInputStream extends FSInputStream {
   private long pos;
   private boolean closed;
   private S3ObjectInputStream wrappedStream;
-  private S3Object wrappedObject;
   private FileSystem.Statistics stats;
   private AmazonS3Client client;
   private String bucket;
   private String key;
   private long contentLength;
   public static final Logger LOG = S3AFileSystem.LOG;
-
+  public static final long CLOSE_THRESHOLD = 4096;
 
   public S3AInputStream(String bucket, String key, long contentLength, 
AmazonS3Client client,
 FileSystem.Statistics stats) {
@@ -55,12 +54,11 @@ public class S3AInputStream extends FSInputStream {
 this.stats = stats;
 this.pos = 0;
 this.closed = false;
-this.wrappedObject = null;
 this.wrappedStream = null;
   }
 
   private void openIfNeeded() throws IOException {
-if (wrappedObject == null) {
+if (wrappedStream == null) {
   reopen(0);
 }
   }
@@ -90,8 +88,7 @@ public class S3AInputStream extends FSInputStream {
 GetObjectRequest request = new GetObjectRequest(bucket, key);
 request.setRange(pos, contentLength-1);
 
-wrappedObject = client.getObject(request);
-wrappedStream = wrappedObject.getObjectContent();
+wrappedStream = client.getObject(request).getObjectContent();
 
 if (wrappedStream == null) {
   throw new IOException(Null IO stream);
@@ -192,8 +189,15 @@ public class S3AInputStream extends FSInputStream {
   public synchronized void close() throws IOException {
 super.close();
 closed = true;
-if (wrappedObject != null) {
-  wrappedObject.close();
+if (wrappedStream != null) {
+  if (contentLength - pos = CLOSE_THRESHOLD) {
+// Close, rather than abort, so that the http connection can be reused.
+wrappedStream.close();
+  } else {
+// Abort, rather than just close, the underlying stream.  Otherwise, 
the
+// remaining object payload is read from S3 while closing the stream.
+wrappedStream.abort();
+  }
 }
   }
 



[6/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm
deleted file mode 100644
index c33f647..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm
+++ /dev/null
@@ -1,2709 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  MapReduce Application Master REST API's.
-  ---
-  ---
-  ${maven.build.timestamp}
-
-MapReduce Application Master REST API's.
-
-%{toc|section=1|fromDepth=0|toDepth=2}
-
-* Overview
-
-  The MapReduce Application Master REST API's allow the user to get status on 
the running MapReduce application master. Currently this is the equivalent to a 
running MapReduce job. The information includes the jobs the app master is 
running and all the job particulars like tasks, counters, configuration, 
attempts, etc. The application master should be accessed via the proxy.  This 
proxy is configurable to run either on the resource manager or on a separate 
host. The proxy URL usually looks like: http://proxy http 
address:port/proxy/{appid}.
-  
-* Mapreduce Application Master Info API
-
-  The MapReduce application master information resource provides overall 
information about that mapreduce application master. This includes application 
id, time it was started, user, name, etc. 
-
-** URI
-
-  Both of the following URI's give you the MapReduce application master 
information, from an application id identified by the appid value. 
-
---
-  * http://proxy http address:port/proxy/{appid}/ws/v1/mapreduce
-  * http://proxy http address:port/proxy/{appid}/ws/v1/mapreduce/info
---
-
-** HTTP Operations Supported
-
---
-  * GET
---
-
-** Query Parameters Supported
-
---
-  None
---
-
-** Elements of the info object
-
-  When you make a request for the mapreduce application master information, 
the information will be returned as an info object.
-
-*---+--+---+
-|| Item || Data Type   || Description   |
-*---+--+---+
-| appId| long | The application id |
-*---+--+---+
-| startedOn | long | The time the application started (in ms since 
epoch)|
-*---+--+---+
-| name | string | The name of the application |
-*---+--+---+
-| user | string | The user name of the user who started the application |
-*---+--+---+
-| elapsedTime | long | The time since the application was started (in ms)|
-*---+--+---+
-
-** Response Examples
-
-  JSON response
-
-  HTTP Request:
-
---
-  GET http://proxy http 
address:port/proxy/application_1326232085508_0003/ws/v1/mapreduce/info
---
-
-  Response Header:
-
-+---+
-  HTTP/1.1 200 OK
-  Content-Type: application/json
-  Transfer-Encoding: chunked
-  Server: Jetty(6.1.26)
-+---+
-
-  Response Body:
-
-+---+
-{   
-  info : {
-  appId : application_1326232085508_0003,
-  startedOn : 1326238244047,
-  user : user1,
-  name : Sleep job,
-  elapsedTime : 32374
-   }
-}
-+---+
-
-  XML response
-
-  HTTP Request:
-
--
-  Accept: application/xml
-  GET http://proxy http 
address:port/proxy/application_1326232085508_0003/ws/v1/mapreduce/info
--
-
-  Response Header:
-
-+---+
-  HTTP/1.1 200 OK
-  Content-Type: application/xml
-  Content-Length: 223
-  Server: Jetty(6.1.26)
-+---+
-
-  Response Body:
-
-+---+
-?xml version=1.0 encoding=UTF-8 standalone=yes?
-info
-  appIdapplication_1326232085508_0003/appId
-  nameSleep job/name
-  useruser1/user
-  startedOn1326238244047/startedOn
-  elapsedTime32407/elapsedTime
-/info
-+---+
-
-* Jobs API
-
-  The jobs resource provides a list of the jobs running on this application 
master.  See also {{Job API}} 

[2/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
new file mode 100644
index 000..3cfa39d
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -0,0 +1,73 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+Hadoop: Pluggable Shuffle and Pluggable Sort
+
+
+Introduction
+
+
+The pluggable shuffle and pluggable sort capabilities allow replacing the 
built in shuffle and sort logic with alternate implementations. Example use 
cases for this are: using a different application protocol other than HTTP such 
as RDMA for shuffling data from the Map nodes to the Reducer nodes; or 
replacing the sort logic with custom algorithms that enable Hash aggregation 
and Limit-N query.
+
+**IMPORTANT:** The pluggable shuffle and pluggable sort capabilities are 
experimental and unstable. This means the provided APIs may change and break 
compatibility in future versions of Hadoop.
+
+Implementing a Custom Shuffle and a Custom Sort
+---
+
+A custom shuffle implementation requires a
+`org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices.AuxiliaryService`
+implementation class running in the NodeManagers and a
+`org.apache.hadoop.mapred.ShuffleConsumerPlugin`
+implementation class running in the Reducer tasks.
+
+The default implementations provided by Hadoop can be used as references:
+
+* `org.apache.hadoop.mapred.ShuffleHandler`
+* `org.apache.hadoop.mapreduce.task.reduce.Shuffle`
+
+A custom sort implementation requires a 
`org.apache.hadoop.mapred.MapOutputCollector` implementation class running in 
the Mapper tasks and (optionally, depending on the sort implementation) a 
`org.apache.hadoop.mapred.ShuffleConsumerPlugin` implementation class running 
in the Reducer tasks.
+
+The default implementations provided by Hadoop can be used as references:
+
+* `org.apache.hadoop.mapred.MapTask$MapOutputBuffer`
+* `org.apache.hadoop.mapreduce.task.reduce.Shuffle`
+
+Configuration
+-
+
+Except for the auxiliary service running in the NodeManagers serving the 
shuffle (by default the `ShuffleHandler`), all the pluggable components run in 
the job tasks. This means, they can be configured on per job basis. The 
auxiliary service servicing the Shuffle must be configured in the NodeManagers 
configuration.
+
+### Job Configuration Properties (on per job basis):
+
+| **Property** | **Default Value** | **Explanation** |
+|: |: |: |
+| `mapreduce.job.reduce.shuffle.consumer.plugin.class` | 
`org.apache.hadoop.mapreduce.task.reduce.Shuffle` | The `ShuffleConsumerPlugin` 
implementation to use |
+| `mapreduce.job.map.output.collector.class` | 
`org.apache.hadoop.mapred.MapTask$MapOutputBuffer` | The `MapOutputCollector` 
implementation(s) to use |
+
+These properties can also be set in the `mapred-site.xml` to change the 
default values for all jobs.
+
+The collector class configuration may specify a comma-separated list of 
collector implementations. In this case, the map task will attempt to 
instantiate each in turn until one of the implementations successfully 
initializes. This can be useful if a given collector implementation is only 
compatible with certain types of keys or values, for example.
+
+### NodeManager Configuration properties, `yarn-site.xml` in all nodes:
+
+| **Property** | **Default Value** | **Explanation** |
+|: |: |: |
+| `yarn.nodemanager.aux-services` | `...,mapreduce_shuffle` | The auxiliary 
service name |
+| `yarn.nodemanager.aux-services.mapreduce_shuffle.class` | 
`org.apache.hadoop.mapred.ShuffleHandler` | The auxiliary service class to use |
+
+**IMPORTANT:** If setting an auxiliary service in addition the default
+`mapreduce_shuffle` service, then a new service key should be added to the
+`yarn.nodemanager.aux-services` property, for 

[4/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
new file mode 100644
index 000..ccc9590
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -0,0 +1,1156 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+MapReduce Tutorial
+==
+
+* [MapReduce Tutorial](#MapReduce_Tutorial)
+* [Purpose](#Purpose)
+* [Prerequisites](#Prerequisites)
+* [Overview](#Overview)
+* [Inputs and Outputs](#Inputs_and_Outputs)
+* [Example: WordCount v1.0](#Example:_WordCount_v1.0)
+* [Source Code](#Source_Code)
+* [Usage](#Usage)
+* [Walk-through](#Walk-through)
+* [MapReduce - User Interfaces](#MapReduce_-_User_Interfaces)
+* [Payload](#Payload)
+* [Mapper](#Mapper)
+* [Reducer](#Reducer)
+* [Partitioner](#Partitioner)
+* [Counter](#Counter)
+* [Job Configuration](#Job_Configuration)
+* [Task Execution  Environment](#Task_Execution__Environment)
+* [Memory Management](#Memory_Management)
+* [Map Parameters](#Map_Parameters)
+* [Shuffle/Reduce Parameters](#ShuffleReduce_Parameters)
+* [Configured Parameters](#Configured_Parameters)
+* [Task Logs](#Task_Logs)
+* [Distributing Libraries](#Distributing_Libraries)
+* [Job Submission and Monitoring](#Job_Submission_and_Monitoring)
+* [Job Control](#Job_Control)
+* [Job Input](#Job_Input)
+* [InputSplit](#InputSplit)
+* [RecordReader](#RecordReader)
+* [Job Output](#Job_Output)
+* [OutputCommitter](#OutputCommitter)
+* [Task Side-Effect Files](#Task_Side-Effect_Files)
+* [RecordWriter](#RecordWriter)
+* [Other Useful Features](#Other_Useful_Features)
+* [Submitting Jobs to Queues](#Submitting_Jobs_to_Queues)
+* [Counters](#Counters)
+* [DistributedCache](#DistributedCache)
+* [Profiling](#Profiling)
+* [Debugging](#Debugging)
+* [Data Compression](#Data_Compression)
+* [Skipping Bad Records](#Skipping_Bad_Records)
+* [Example: WordCount v2.0](#Example:_WordCount_v2.0)
+* [Source Code](#Source_Code)
+* [Sample Runs](#Sample_Runs)
+* [Highlights](#Highlights)
+
+Purpose
+---
+
+This document comprehensively describes all user-facing facets of the Hadoop 
MapReduce framework and serves as a tutorial.
+
+Prerequisites
+-
+
+Ensure that Hadoop is installed, configured and is running. More details:
+
+*   [Single Node 
Setup](../../hadoop-project-dist/hadoop-common/SingleCluster.html)
+for first-time users.
+
+*   [Cluster Setup](../../hadoop-project-dist/hadoop-common/ClusterSetup.html)
+for large, distributed clusters.
+
+Overview
+
+
+Hadoop MapReduce is a software framework for easily writing applications which 
process vast amounts of data (multi-terabyte data-sets) in-parallel on large 
clusters (thousands of nodes) of commodity hardware in a reliable, 
fault-tolerant manner.
+
+A MapReduce *job* usually splits the input data-set into independent chunks 
which are processed by the *map tasks* in a completely parallel manner. The 
framework sorts the outputs of the maps, which are then input to the *reduce 
tasks*. Typically both the input and the output of the job are stored in a 
file-system. The framework takes care of scheduling tasks, monitoring them and 
re-executes the failed tasks.
+
+Typically the compute nodes and the storage nodes are the same, that is, the 
MapReduce framework and the Hadoop Distributed File System (see [HDFS 
Architecture Guide](../../hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) are 
running on the same set of nodes. This configuration allows the framework to 
effectively schedule tasks on the nodes where data is already 

[8/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b787e2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b787e2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b787e2f

Branch: refs/heads/trunk
Commit: 8b787e2fdbd0050c0345cf14b26af9d61049068f
Parents: 34b78d5
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 17 06:52:14 2015 -1000
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Feb 17 06:52:14 2015 -1000

--
 hadoop-mapreduce-project/CHANGES.txt|3 +
 .../src/site/apt/DistributedCacheDeploy.apt.vm  |  151 -
 .../src/site/apt/EncryptedShuffle.apt.vm|  320 ---
 .../src/site/apt/MapReduceTutorial.apt.vm   | 1605 ---
 ...pReduce_Compatibility_Hadoop1_Hadoop2.apt.vm |  114 -
 .../src/site/apt/MapredAppMasterRest.apt.vm | 2709 --
 .../src/site/apt/MapredCommands.apt.vm  |  233 --
 .../apt/PluggableShuffleAndPluggableSort.apt.vm |   98 -
 .../site/markdown/DistributedCacheDeploy.md.vm  |  119 +
 .../src/site/markdown/EncryptedShuffle.md   |  255 ++
 .../src/site/markdown/MapReduceTutorial.md  | 1156 
 .../MapReduce_Compatibility_Hadoop1_Hadoop2.md  |   69 +
 .../src/site/markdown/MapredAppMasterRest.md| 2397 
 .../src/site/markdown/MapredCommands.md |  153 +
 .../PluggableShuffleAndPluggableSort.md |   73 +
 .../src/site/apt/HistoryServerRest.apt.vm   | 2672 -
 .../src/site/markdown/HistoryServerRest.md  | 2361 +++
 17 files changed, 6586 insertions(+), 7902 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9ef7a32..aebc71e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -96,6 +96,9 @@ Trunk (Unreleased)
 
 MAPREDUCE-6250. deprecate sbin/mr-jobhistory-daemon.sh (aw)
 
+MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki
+via aw)
+
   BUG FIXES
 
 MAPREDUCE-6191. Improve clearing stale state of Java serialization

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/DistributedCacheDeploy.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/DistributedCacheDeploy.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/DistributedCacheDeploy.apt.vm
deleted file mode 100644
index 2195e10..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/DistributedCacheDeploy.apt.vm
+++ /dev/null
@@ -1,151 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Map Reduce Next Generation-${project.version} - Distributed Cache 
Deploy
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Hadoop MapReduce Next Generation - Distributed Cache Deploy
-
-* Introduction
-
-  The MapReduce application framework has rudimentary support for deploying a
-  new version of the MapReduce framework via the distributed cache. By setting
-  the appropriate configuration properties, users can run a different version
-  of MapReduce than the one initially deployed to the cluster. For example,
-  cluster administrators can place multiple versions of MapReduce in HDFS and
-  configure mapred-site.xml to specify which version jobs will use by
-  default. This allows the administrators to perform a rolling upgrade of the
-  MapReduce framework under certain conditions.
-
-* Preconditions and Limitations
-
-  The support for deploying the MapReduce framework via the distributed cache
-  currently does not address the job client code used to submit and query
-  jobs. It also does not address the ShuffleHandler code that runs as an
-  auxilliary service within each NodeManager. As a result the following
- 

[2/2] hadoop git commit: HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of the object from S3. (Dan Hecht via stevel).

2015-02-17 Thread stevel
HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of the 
object from S3. (Dan Hecht via stevel).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826267f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826267f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826267f7

Branch: refs/heads/trunk
Commit: 826267f789df657c62f7f5909e5a0b1a7b102c34
Parents: f0412de
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 16:36:32 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 16:36:44 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 20 
 2 files changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826267f7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c3aafe8..0d1ef36 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -939,6 +939,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned
 to SERVICE_NOT_RESPONDING (Ming Ma via vinayakumarb)
 
+HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
+the object from S3. (Dan Hecht via stevel).
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/826267f7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 4c56b82..685026e 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -37,14 +37,13 @@ public class S3AInputStream extends FSInputStream {
   private long pos;
   private boolean closed;
   private S3ObjectInputStream wrappedStream;
-  private S3Object wrappedObject;
   private FileSystem.Statistics stats;
   private AmazonS3Client client;
   private String bucket;
   private String key;
   private long contentLength;
   public static final Logger LOG = S3AFileSystem.LOG;
-
+  public static final long CLOSE_THRESHOLD = 4096;
 
   public S3AInputStream(String bucket, String key, long contentLength, 
AmazonS3Client client,
 FileSystem.Statistics stats) {
@@ -55,12 +54,11 @@ public class S3AInputStream extends FSInputStream {
 this.stats = stats;
 this.pos = 0;
 this.closed = false;
-this.wrappedObject = null;
 this.wrappedStream = null;
   }
 
   private void openIfNeeded() throws IOException {
-if (wrappedObject == null) {
+if (wrappedStream == null) {
   reopen(0);
 }
   }
@@ -90,8 +88,7 @@ public class S3AInputStream extends FSInputStream {
 GetObjectRequest request = new GetObjectRequest(bucket, key);
 request.setRange(pos, contentLength-1);
 
-wrappedObject = client.getObject(request);
-wrappedStream = wrappedObject.getObjectContent();
+wrappedStream = client.getObject(request).getObjectContent();
 
 if (wrappedStream == null) {
   throw new IOException(Null IO stream);
@@ -192,8 +189,15 @@ public class S3AInputStream extends FSInputStream {
   public synchronized void close() throws IOException {
 super.close();
 closed = true;
-if (wrappedObject != null) {
-  wrappedObject.close();
+if (wrappedStream != null) {
+  if (contentLength - pos = CLOSE_THRESHOLD) {
+// Close, rather than abort, so that the http connection can be reused.
+wrappedStream.close();
+  } else {
+// Abort, rather than just close, the underlying stream.  Otherwise, 
the
+// remaining object payload is read from S3 while closing the stream.
+wrappedStream.abort();
+  }
 }
   }
 



hadoop git commit: HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)

2015-02-17 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 35fecb530 - 2cbac36fd


HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)

(cherry picked from commit f0412de1c1d42b3c2a92531f81d97a24df920523)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cbac36f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cbac36f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cbac36f

Branch: refs/heads/branch-2
Commit: 2cbac36fd3eb1160baf53f643223f96d53d111df
Parents: 35fecb5
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Tue Feb 17 21:28:49 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Tue Feb 17 21:31:43 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/BookKeeperJournalManager.java |  15 +-
 .../bkjournal/TestBookKeeperJournalManager.java | 153 ++-
 3 files changed, 163 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cbac36f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 58561d9..b95eded 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -346,6 +346,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)
 
+HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cbac36f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index aecc464..16ffe52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -152,6 +152,13 @@ public class BookKeeperJournalManager implements 
JournalManager {
 = dfs.namenode.bookkeeperjournal.readEntryTimeoutSec;
   public static final int BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT = 5;
 
+  public static final String BKJM_BOOKKEEPER_ACK_QUORUM_SIZE 
+= dfs.namenode.bookkeeperjournal.ack.quorum-size;
+
+  public static final String BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC
+= dfs.namenode.bookkeeperjournal.addEntryTimeoutSec;
+  public static final int BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT = 5;
+
   private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
@@ -162,6 +169,8 @@ public class BookKeeperJournalManager implements 
JournalManager {
   private final MaxTxId maxTxId;
   private final int ensembleSize;
   private final int quorumSize;
+  private final int ackQuorumSize;
+  private final int addEntryTimeout;
   private final String digestpw;
   private final int speculativeReadTimeout;
   private final int readEntryTimeout;
@@ -184,6 +193,9 @@ public class BookKeeperJournalManager implements 
JournalManager {
BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
 quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
  BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
+ackQuorumSize = conf.getInt(BKJM_BOOKKEEPER_ACK_QUORUM_SIZE, quorumSize);
+addEntryTimeout = conf.getInt(BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC,
+ BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT);
 speculativeReadTimeout = conf.getInt(
  BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS,
  BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT);
@@ -216,6 +228,7 @@ public class BookKeeperJournalManager implements 
JournalManager {
   ClientConfiguration clientConf = new ClientConfiguration();
   clientConf.setSpeculativeReadTimeout(speculativeReadTimeout);
   clientConf.setReadEntryTimeout(readEntryTimeout);
+  clientConf.setAddEntryTimeout(addEntryTimeout);
   bkc = new BookKeeper(clientConf, zkc);
 } catch (KeeperException e) {
   throw new IOException(Error initializing zk, e);
@@ -403,7 +416,7 @@ public class BookKeeperJournalManager implements 

hadoop git commit: HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM documentation (Arshad Mohammad via aw)

2015-02-17 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 826267f78 - 34b78d51b


HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM documentation 
(Arshad Mohammad via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34b78d51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34b78d51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34b78d51

Branch: refs/heads/trunk
Commit: 34b78d51b5b3dba1988b46c47af1739a4ed7b339
Parents: 826267f
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 17 06:46:37 2015 -1000
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Feb 17 06:46:37 2015 -1000

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b78d51/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f28e41e..5e54731 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -301,6 +301,9 @@ Trunk (Unreleased)
 HDFS-7791. dfs count -v should be added to quota documentation (Akira
 AJISAKA via aw)
 
+HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM
+documentation (Arshad Mohammad via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b78d51/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 0c84f75..a285fde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -371,7 +371,7 @@ Once the JournalNodes have been started, one must initially 
synchronize the two
   sufficient edits transactions to be able to start both NameNodes.
 
 * If you are converting a non-HA NameNode to be HA, you should run the
-  command *hdfs -initializeSharedEdits*, which will initialize the
+  command *hdfs namenode -initializeSharedEdits*, which will initialize the
   JournalNodes with the edits data from the local NameNode edits directories.
 
 At this point you may start both of your HA NameNodes as you normally would 
start a NameNode.



[1/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34b78d51b - 8b787e2fd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
new file mode 100644
index 000..8a78754
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
@@ -0,0 +1,2361 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+MapReduce History Server REST API's.
+
+
+* [MapReduce History Server REST API's.](#MapReduce_History_Server_REST_APIs.)
+* [Overview](#Overview)
+* [History Server Information API](#History_Server_Information_API)
+* [URI](#URI)
+* [HTTP Operations Supported](#HTTP_Operations_Supported)
+* [Query Parameters Supported](#Query_Parameters_Supported)
+* [Elements of the historyInfo 
object](#Elements_of_the_historyInfo_object)
+* [Response Examples](#Response_Examples)
+* [MapReduce API's](#MapReduce_APIs)
+* [Jobs API](#Jobs_API)
+* [Job API](#Job_API)
+* [Elements of the acls object](#Elements_of_the_acls_object)
+* [Job Attempts API](#Job_Attempts_API)
+* [Job Counters API](#Job_Counters_API)
+* [Job Conf API](#Job_Conf_API)
+* [Tasks API](#Tasks_API)
+* [Task API](#Task_API)
+* [Task Counters API](#Task_Counters_API)
+* [Task Attempts API](#Task_Attempts_API)
+* [Task Attempt API](#Task_Attempt_API)
+* [Task Attempt Counters API](#Task_Attempt_Counters_API)
+
+Overview
+
+
+The history server REST API's allow the user to get status on finished 
applications.
+
+History Server Information API
+--
+
+The history server information resource provides overall information about the 
history server.
+
+### URI
+
+Both of the following URI's give you the history server information, from an 
application id identified by the appid value.
+
+  * http://history server http address:port/ws/v1/history
+  * http://history server http address:port/ws/v1/history/info
+
+### HTTP Operations Supported
+
+  * GET
+
+### Query Parameters Supported
+
+  None
+
+### Elements of the *historyInfo* object
+
+| Item | Data Type | Description |
+|: |: |: |
+| startedOn | long | The time the history server was started (in ms since 
epoch) |
+| hadoopVersion | string | Version of hadoop common |
+| hadoopBuildVersion | string | Hadoop common build string with build version, 
user, and checksum |
+| hadoopVersionBuiltOn | string | Timestamp when hadoop common was built |
+
+### Response Examples
+
+**JSON response**
+
+HTTP Request:
+
+  GET http://history server http address:port/ws/v1/history/info
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+{   
+   historyInfo : {
+  startedOn:1353512830963,
+  hadoopVersionBuiltOn : Wed Jan 11 21:18:36 UTC 2012,
+  hadoopBuildVersion : 0.23.1-SNAPSHOT from 1230253 by user1 source 
checksum bb6e554c6d50b0397d826081017437a7,
+  hadoopVersion : 0.23.1-SNAPSHOT
+   }
+}
+
+**XML response**
+
+HTTP Request:
+
+  GET http://history server http address:port/ws/v1/history/info
+  Accept: application/xml
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/xml
+  Content-Length: 330
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+?xml version=1.0 encoding=UTF-8 standalone=yes?
+historyInfo
+  startedOn1353512830963/startedOn
+  hadoopVersion0.23.1-SNAPSHOT/hadoopVersion
+  hadoopBuildVersion0.23.1-SNAPSHOT from 1230253 by user1 source 
checksum bb6e554c6d50b0397d826081017437a7/hadoopBuildVersion
+  hadoopVersionBuiltOnWed Jan 11 21:18:36 UTC 2012/hadoopVersionBuiltOn
+/historyInfo
+
+MapReduce API's
+---
+
+The following list of resources apply to MapReduce.
+
+### Jobs API
+

[5/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredCommands.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredCommands.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredCommands.apt.vm
deleted file mode 100644
index e011563..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredCommands.apt.vm
+++ /dev/null
@@ -1,233 +0,0 @@
-~~ Licensed to the Apache Software Foundation (ASF) under one or more
-~~ contributor license agreements.  See the NOTICE file distributed with
-~~ this work for additional information regarding copyright ownership.
-~~ The ASF licenses this file to You under the Apache License, Version 2.0
-~~ (the License); you may not use this file except in compliance with
-~~ the License.  You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-
-  ---
-  MapReduce Commands Guide
-  ---
-  ---
-  ${maven.build.timestamp}
-
-MapReduce Commands Guide
-
-%{toc|section=1|fromDepth=2|toDepth=4}
-
-* Overview
-
-  MapReduce commands are invoked by the bin/mapred script. Running the
-  script without any arguments prints the description for all commands.
-
-   Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND
-
-   MapReduce has an option parsing framework that employs parsing generic
-   options as well as running classes.
-
-*-+---+
-|| COMMAND_OPTIONS|| Description  |
-*-+---+
-| --config confdir | Overwrites the default Configuration directory. Default
-|  | is $\{HADOOP_PREFIX\}/conf.
-*-+---+
-| --loglevel loglevel | Overwrites the log level. Valid log levels are FATAL,
-| | ERROR, WARN, INFO, DEBUG, and TRACE. Default is INFO.
-*-+---+
-| COMMAND COMMAND_OPTIONS | Various commands with their options are described
-| | in the following sections. The commands have been
-| | grouped into {{User Commands}} and
-| | {{Administration Commands}}.
-*-+---+
-
-* User Commands
-
-   Commands useful for users of a hadoop cluster.
-
-** pipes
-
-   Runs a pipes job.
-
-   Usage: mapred pipes [-conf path] [-jobconf key=value, key=value,
-   ...] [-input path] [-output path] [-jar jar file] [-inputformat
-   class] [-map class] [-partitioner class] [-reduce class] [-writer
-   class] [-program executable] [-reduces num]
-
-*++
-|| COMMAND_OPTION|| Description
-*++
-| -conf path   | Configuration for job
-*++
-| -jobconf key=value, key=value, ... | Add/override configuration for job
-*++
-| -input path  | Input directory
-*++
-| -output path | Output directory
-*++
-| -jar jar file| Jar filename
-*++
-| -inputformat class   | InputFormat class
-*++
-| -map class   | Java Map class
-*++
-| -partitioner class   | Java Partitioner
-*++
-| -reduce class| Java Reduce class
-*++
-| -writer class| Java RecordWriter

[7/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapReduceTutorial.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapReduceTutorial.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapReduceTutorial.apt.vm
deleted file mode 100644
index 9fb1056..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapReduceTutorial.apt.vm
+++ /dev/null
@@ -1,1605 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  MapReduce Tutorial
-  ---
-  ---
-  ${maven.build.timestamp}
-
-MapReduce Tutorial
-
-%{toc|section=1|fromDepth=0|toDepth=4}
-
-* Purpose
-
-  This document comprehensively describes all user-facing facets of
-  the Hadoop MapReduce framework and serves as a tutorial.
-
-* Prerequisites
-
-  Ensure that Hadoop is installed, configured and is running. More details:
-
-  * {{{../../hadoop-project-dist/hadoop-common/SingleCluster.html}
-Single Node Setup}} for first-time users.
-
-  * {{{../../hadoop-project-dist/hadoop-common/ClusterSetup.html}
-Cluster Setup}} for large, distributed clusters.
-
-* Overview
-
-  Hadoop MapReduce is a software framework for easily writing applications
-  which process vast amounts of data (multi-terabyte data-sets) in-parallel
-  on large clusters (thousands of nodes) of commodity hardware in a reliable,
-  fault-tolerant manner.
-
-  A MapReduce job usually splits the input data-set into independent chunks
-  which are processed by the map tasks in a completely parallel manner. The
-  framework sorts the outputs of the maps, which are then input to the reduce
-  tasks. Typically both the input and the output of the job are stored in
-  a file-system. The framework takes care of scheduling tasks, monitoring them
-  and re-executes the failed tasks.
-
-  Typically the compute nodes and the storage nodes are the same, that is,
-  the MapReduce framework and the Hadoop Distributed File System
-  (see {{{../../hadoop-project-dist/hadoop-hdfs/HdfsDesign.html}
-  HDFS Architecture Guide}}) are running on the same set of nodes. This
-  configuration allows the framework to effectively schedule tasks on the nodes
-  where data is already present, resulting in very high aggregate bandwidth
-  across the cluster.
-
-  The MapReduce framework consists of a single master ResourceManager,
-  one slave NodeManager per cluster-node, and MRAppMaster per
-  application (see {{{../../hadoop-yarn/hadoop-yarn-site/YARN.html}
-  YARN Architecture Guide}}).
-
-  Minimally, applications specify the input/output locations and supply map
-  and reduce functions via implementations of appropriate interfaces and/or
-  abstract-classes. These, and other job parameters, comprise the job
-  configuration.
-
-  The Hadoop job client then submits the job (jar/executable etc.) and
-  configuration to the ResourceManager which then assumes the
-  responsibility of distributing the software/configuration to the slaves,
-  scheduling tasks and monitoring them, providing status and diagnostic
-  information to the job-client.
-
-  Although the Hadoop framework is implemented in Java\u2122, MapReduce
-  applications need not be written in Java.
-
-  * {{{../../api/org/apache/hadoop/streaming/package-summary.html}
-Hadoop Streaming}} is a utility which allows users to create and run jobs
-with any executables (e.g. shell utilities) as the mapper and/or the
-reducer.
-
-  * {{{../../api/org/apache/hadoop/mapred/pipes/package-summary.html}
-Hadoop Pipes}} is a {{{http://www.swig.org/}SWIG}}-compatible C++ API to
-implement MapReduce applications (non JNI\u2122 based).
-
-* Inputs and Outputs
-
-  The MapReduce framework operates exclusively on \key, value\ pairs,
-  that is, the framework views the input to the job as a set of \key,
-  value\ pairs and produces a set of \key, value\ pairs as the
-  output of the job, conceivably of different types.
-
-  The key and value classes have to be serializable by the
-  framework and hence need to implement the
-  {{{../../api/org/apache/hadoop/io/Writable.html}Writable}} interface.
-  Additionally, the key classes have to implement the
-  

[3/8] hadoop git commit: MAPREDUCE-6260. Convert site documentation to markdown (Masatake Iwasaki via aw)

2015-02-17 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b787e2f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
new file mode 100644
index 000..b11b0ae
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
@@ -0,0 +1,2397 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+MapReduce Application Master REST API's.
+
+
+* [MapReduce Application Master REST 
API's.](#MapReduce_Application_Master_REST_APIs.)
+* [Overview](#Overview)
+* [Mapreduce Application Master Info 
API](#Mapreduce_Application_Master_Info_API)
+* [Jobs API](#Jobs_API)
+* [Job API](#Job_API)
+* [Job Attempts API](#Job_Attempts_API)
+* [Job Counters API](#Job_Counters_API)
+* [Job Conf API](#Job_Conf_API)
+* [Tasks API](#Tasks_API)
+* [Task API](#Task_API)
+* [Task Counters API](#Task_Counters_API)
+* [Task Attempts API](#Task_Attempts_API)
+* [Task Attempt API](#Task_Attempt_API)
+* [Task Attempt Counters API](#Task_Attempt_Counters_API)
+
+Overview
+
+
+The MapReduce Application Master REST API's allow the user to get status on 
the running MapReduce application master. Currently this is the equivalent to a 
running MapReduce job. The information includes the jobs the app master is 
running and all the job particulars like tasks, counters, configuration, 
attempts, etc. The application master should be accessed via the proxy. This 
proxy is configurable to run either on the resource manager or on a separate 
host. The proxy URL usually looks like: `http://proxy http 
address:port/proxy/appid`.
+
+Mapreduce Application Master Info API
+-
+
+The MapReduce application master information resource provides overall 
information about that mapreduce application master. This includes application 
id, time it was started, user, name, etc.
+
+### URI
+
+Both of the following URI's give you the MapReduce application master 
information, from an application id identified by the appid value.
+
+  * http://proxy http address:port/proxy/{appid}/ws/v1/mapreduce
+  * http://proxy http address:port/proxy/{appid}/ws/v1/mapreduce/info
+
+### HTTP Operations Supported
+
+  * GET
+
+### Query Parameters Supported
+
+  None
+
+### Elements of the *info* object
+
+When you make a request for the mapreduce application master information, the 
information will be returned as an info object.
+
+| Item | Data Type | Description |
+|: |: |: |
+| appId | long | The application id |
+| startedOn | long | The time the application started (in ms since epoch) |
+| name | string | The name of the application |
+| user | string | The user name of the user who started the application |
+| elapsedTime | long | The time since the application was started (in ms) |
+
+### Response Examples
+
+**JSON response**
+
+HTTP Request:
+
+  GET http://proxy http 
address:port/proxy/application_1326232085508_0003/ws/v1/mapreduce/info
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+{   
+  info : {
+  appId : application_1326232085508_0003,
+  startedOn : 1326238244047,
+  user : user1,
+  name : Sleep job,
+  elapsedTime : 32374
+   }
+}
+
+**XML response**
+
+HTTP Request:
+
+  Accept: application/xml
+  GET http://proxy http 
address:port/proxy/application_1326232085508_0003/ws/v1/mapreduce/info
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/xml
+  Content-Length: 223
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+?xml version=1.0 encoding=UTF-8 standalone=yes?
+info
+  appIdapplication_1326232085508_0003/appId
+  nameSleep job/name
+  useruser1/user
+  startedOn1326238244047/startedOn
+  elapsedTime32407/elapsedTime
+/info
+
+Jobs API
+
+
+The jobs resource 

hadoop git commit: HADOOP-11575. Daemon log documentation is misleading (Naganarasimha G R via aw)

2015-02-17 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8b787e2fd - 72389c78d


HADOOP-11575. Daemon log documentation is misleading (Naganarasimha G R via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72389c78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72389c78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72389c78

Branch: refs/heads/trunk
Commit: 72389c78dba1e2f7727245838180fb51e5241075
Parents: 8b787e2
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 17 07:00:00 2015 -1000
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Feb 17 07:00:00 2015 -1000

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/log/LogLevel.java| 10 +-
 .../src/site/markdown/CommandsManual.md | 10 ++
 .../src/site/apt/YarnCommands.apt.vm| 21 +++-
 4 files changed, 26 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72389c78/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0d1ef36..1256ae5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -175,6 +175,9 @@ Trunk (Unreleased)
 HADOOP-11495. Convert site documentation from apt to markdown
 (Masatake Iwasaki via aw)
 
+HADOOP-11575. Daemon log documentation is misleading
+(Naganarasimha G R via aw)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72389c78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4749ce1..baf71b6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.util.ServletUtil;
 @InterfaceStability.Evolving
 public class LogLevel {
   public static final String USAGES = \nUsage: General options are:\n
-  + \t[-getlevel host:httpPort name]\n
-  + \t[-setlevel host:httpPort name level]\n;
+  + \t[-getlevel host:httpPort classname]\n
+  + \t[-setlevel host:httpPort classname level]\n;
 
   /**
* A command line implementation
@@ -106,7 +106,7 @@ public class LogLevel {
   if (logName != null) {
 out.println(br /hr /h3Results/h3);
 out.println(MARKER
-+ Submitted Log Name: b + logName + /bbr /);
++ Submitted Class Name: b + logName + /bbr /);
 
 Log log = LogFactory.getLog(logName);
 out.println(MARKER
@@ -131,10 +131,10 @@ public class LogLevel {
 }
 
 static final String FORMS = \nbr /hr /h3Get / Set/h3
-+ \nformLog: input type='text' size='50' name='log' / 
++ \nformClass Name: input type='text' size='50' name='log' / 
 + input type='submit' value='Get Log Level' /
 + /form
-+ \nformLog: input type='text' size='50' name='log' / 
++ \nformClass Name: input type='text' size='50' name='log' / 
 + Level: input type='text' name='level' / 
 + input type='submit' value='Set Log Level' /
 + /form;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72389c78/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index f0c0367..62235c5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -212,14 +212,16 @@ Commands useful for administrators of a hadoop cluster.
 
 ### `daemonlog`
 
-Usage: `hadoop daemonlog -getlevel host:port name ` Usage: `hadoop 
daemonlog -setlevel host:port name level `
+Usage: `hadoop daemonlog -getlevel host:httpport classname ` Usage: 
`hadoop daemonlog -setlevel host:httpport classname level `
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| `-getlevel` *host:port* *name* | Prints the log level of the daemon running 
at *host:port*. This command internally connects to 
http://host:port/logLevel?log=name |
-| 

hadoop git commit: HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)

2015-02-17 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2f0f756b2 - f24a56787


HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24a5678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24a5678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24a5678

Branch: refs/heads/trunk
Commit: f24a56787a15e89a7c1e777b8043ab9ae8792505
Parents: 2f0f756
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Tue Feb 17 20:11:11 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Tue Feb 17 20:11:11 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 7 ++-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24a5678/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8b234fe..fcf5994 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -637,6 +637,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7604. Track and display failed DataNode storage locations in NameNode.
 (cnauroth)
 
+HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24a5678/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 84ab179..06d7bd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3814,14 +3814,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 writeLock();
+boolean success = false;
 try {
   checkOperation(OperationCategory.WRITE);
   checkNameNodeSafeMode(Cannot set quota on  + src);
   FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
+  success = true;
 } finally {
   writeUnlock();
+  if (success) {
+getEditLog().logSync();
+  }
+  logAuditEvent(success, setQuota, src);
 }
-getEditLog().logSync();
   }
 
   /** Persist all metadata about this file.



hadoop git commit: HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)

2015-02-17 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 00fb0710b - 35fecb530


HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)

(cherry picked from commit f24a56787a15e89a7c1e777b8043ab9ae8792505)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35fecb53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35fecb53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35fecb53

Branch: refs/heads/branch-2
Commit: 35fecb530650fb7f37fa162f369bcdc5969549a7
Parents: 00fb071
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Tue Feb 17 20:11:11 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Tue Feb 17 20:16:34 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 7 ++-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35fecb53/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a1b2053..58561d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -344,6 +344,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7604. Track and display failed DataNode storage locations in NameNode.
 (cnauroth)
 
+HDFS-7797. Add audit log for setQuota operation (Rakesh R via umamahesh)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35fecb53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 15dec21..40f0bf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3813,14 +3813,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 writeLock();
+boolean success = false;
 try {
   checkOperation(OperationCategory.WRITE);
   checkNameNodeSafeMode(Cannot set quota on  + src);
   FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
+  success = true;
 } finally {
   writeUnlock();
+  if (success) {
+getEditLog().logSync();
+  }
+  logAuditEvent(success, setQuota, src);
 }
-getEditLog().logSync();
   }
 
   /** Persist all metadata about this file.



hadoop git commit: HADOOP-11596 moving entry in CHANGES.TXT up to improvements there, as it wasn't in 2.7 it was confusing diffs

2015-02-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13d1ba996 - 6be567094


HADOOP-11596 moving entry in CHANGES.TXT up to improvements there, as it wasn't 
in 2.7  it was confusing diffs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be56709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be56709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be56709

Branch: refs/heads/trunk
Commit: 6be567094983057fa3532f301f7a37667eeae25a
Parents: 13d1ba9
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 20:06:05 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 20:06:05 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be56709/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0de835a..405e821 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -178,6 +178,9 @@ Trunk (Unreleased)
 HADOOP-11575. Daemon log documentation is misleading
 (Naganarasimha G R via aw)
 
+HADOOP-11596. Allow smart-apply-patch.sh to add new files in binary git
+patches (raviprak)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1
@@ -612,9 +615,6 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
 
-HADOOP-11596. Allow smart-apply-patch.sh to add new files in binary git
-patches (raviprak)
-
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



hadoop git commit: HDFS-7795. Show warning if not all favored nodes were chosen by namenode. Contributed by Kihwal Lee. (cherry picked from commit db6606223ca2e17aa7e1b2e2be13c1a19d8e7465)

2015-02-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f52fcdc2e - 8b37b4a78


HDFS-7795. Show warning if not all favored nodes were chosen by namenode. 
Contributed by Kihwal Lee.
(cherry picked from commit db6606223ca2e17aa7e1b2e2be13c1a19d8e7465)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b37b4a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b37b4a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b37b4a7

Branch: refs/heads/branch-2
Commit: 8b37b4a78b3eec139e19c8f0ae1c0311c53afe26
Parents: f52fcdc
Author: Kihwal Lee kih...@apache.org
Authored: Tue Feb 17 13:05:43 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Tue Feb 17 13:06:41 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 26 ++--
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b37b4a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b95eded..9bd4fa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -348,6 +348,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)
 
+HDFS-7795. Show warning if not all favored nodes were chosen by namenode
+(kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b37b4a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 3ed957b..fc9562f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -35,6 +35,7 @@ import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -1443,7 +1444,7 @@ public class DFSOutputStream extends FSOutputSummer
   ExtendedBlock blockCopy = new ExtendedBlock(block);
   blockCopy.setNumBytes(blockSize);
 
-  boolean[] targetPinnings = getPinnings(nodes);
+  boolean[] targetPinnings = getPinnings(nodes, true);
   // send the request
   new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], 
accessToken,
   dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
@@ -1537,20 +1538,29 @@ public class DFSOutputStream extends FSOutputSummer
   }
 }
 
-private boolean[] getPinnings(DatanodeInfo[] nodes) {
+private boolean[] getPinnings(DatanodeInfo[] nodes, boolean shouldLog) {
   if (favoredNodes == null) {
 return null;
   } else {
 boolean[] pinnings = new boolean[nodes.length];
+HashSetString favoredSet =
+new HashSetString(Arrays.asList(favoredNodes));
 for (int i = 0; i  nodes.length; i++) {
-  pinnings[i] = false;
-  for (int j = 0; j  favoredNodes.length; j++) {
-if (nodes[i].getXferAddrWithHostname().equals(favoredNodes[j])) {
-  pinnings[i] = true;
-  break;
-}
+  pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname());
+  if (DFSClient.LOG.isDebugEnabled()) {
+DFSClient.LOG.debug(nodes[i].getXferAddrWithHostname() +
+ was chosen by name node (favored= + pinnings[i] +
+).);
   }
 }
+if (shouldLog  !favoredSet.isEmpty()) {
+  // There is one or more favored nodes that were not allocated.
+  DFSClient.LOG.warn(
+  These favored nodes were specified but not chosen:  +
+  favoredSet +
+   Specified favored nodes:  + Arrays.toString(favoredNodes));
+
+}
 return pinnings;
   }
 }



hadoop git commit: HDFS-7795. Show warning if not all favored nodes were chosen by namenode. Contributed by Kihwal Lee.

2015-02-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 78a7e8d3a - db6606223


HDFS-7795. Show warning if not all favored nodes were chosen by namenode. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db660622
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db660622
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db660622

Branch: refs/heads/trunk
Commit: db6606223ca2e17aa7e1b2e2be13c1a19d8e7465
Parents: 78a7e8d
Author: Kihwal Lee kih...@apache.org
Authored: Tue Feb 17 13:05:43 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Tue Feb 17 13:05:43 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 26 ++--
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db660622/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5e54731..48eb61c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -644,6 +644,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-4266. BKJM: Separate write and ack quorum (Rakesh R via umamahesh)
 
+HDFS-7795. Show warning if not all favored nodes were chosen by namenode
+(kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db660622/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 7d29b3d..85d3410 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -35,6 +35,7 @@ import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -1443,7 +1444,7 @@ public class DFSOutputStream extends FSOutputSummer
   ExtendedBlock blockCopy = new ExtendedBlock(block);
   blockCopy.setNumBytes(blockSize);
 
-  boolean[] targetPinnings = getPinnings(nodes);
+  boolean[] targetPinnings = getPinnings(nodes, true);
   // send the request
   new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], 
accessToken,
   dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
@@ -1537,20 +1538,29 @@ public class DFSOutputStream extends FSOutputSummer
   }
 }
 
-private boolean[] getPinnings(DatanodeInfo[] nodes) {
+private boolean[] getPinnings(DatanodeInfo[] nodes, boolean shouldLog) {
   if (favoredNodes == null) {
 return null;
   } else {
 boolean[] pinnings = new boolean[nodes.length];
+HashSetString favoredSet =
+new HashSetString(Arrays.asList(favoredNodes));
 for (int i = 0; i  nodes.length; i++) {
-  pinnings[i] = false;
-  for (int j = 0; j  favoredNodes.length; j++) {
-if (nodes[i].getXferAddrWithHostname().equals(favoredNodes[j])) {
-  pinnings[i] = true;
-  break;
-}
+  pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname());
+  if (DFSClient.LOG.isDebugEnabled()) {
+DFSClient.LOG.debug(nodes[i].getXferAddrWithHostname() +
+ was chosen by name node (favored= + pinnings[i] +
+).);
   }
 }
+if (shouldLog  !favoredSet.isEmpty()) {
+  // There is one or more favored nodes that were not allocated.
+  DFSClient.LOG.warn(
+  These favored nodes were specified but not chosen:  +
+  favoredSet +
+   Specified favored nodes:  + Arrays.toString(favoredNodes));
+
+}
 return pinnings;
   }
 }



[2/2] hadoop git commit: HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)

2015-02-17 Thread stevel
HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78a7e8d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78a7e8d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78a7e8d3

Branch: refs/heads/trunk
Commit: 78a7e8d3a639a740ed8f2f6340b3921cd16ed16b
Parents: 72389c7
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 18:14:31 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 18:15:02 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../src/main/resources/core-default.xml | 71 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 77 +++-
 3 files changed, 146 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a7e8d3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1256ae5..6e43872 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -610,6 +610,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)
 
+HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a7e8d3/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7135ab8..5eb301a 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -690,12 +690,12 @@ for ldap providers in the same way as above does.
 /property
 
 property
-  namefs.s3a.access.key/name
+  namefs.s3a.awsAccessKeyId/name
   descriptionAWS access key ID. Omit for Role-based 
authentication./description
 /property
 
 property
-  namefs.s3a.secret.key/name
+  namefs.s3a.awsSecretAccessKey/name
   descriptionAWS secret key. Omit for Role-based 
authentication./description
 /property
 
@@ -712,6 +712,46 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.endpoint/name
+  descriptionAWS S3 endpoint to connect to. An up-to-date list is
+provided in the AWS Documentation: regions and endpoints. Without this
+property, the standard region (s3.amazonaws.com) is assumed.
+  /description
+/property
+
+property
+  namefs.s3a.proxy.host/name
+  descriptionHostname of the (optional) proxy server for S3 
connections./description
+/property
+
+property
+  namefs.s3a.proxy.port/name
+  descriptionProxy server port. If this property is not set
+but fs.s3a.proxy.host is, port 80 or 443 is assumed (consistent with
+the value of fs.s3a.connection.ssl.enabled)./description
+/property
+
+property
+  namefs.s3a.proxy.username/name
+  descriptionUsername for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.password/name
+  descriptionPassword for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.domain/name
+  descriptionDomain for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.workstation/name
+  descriptionWorkstation for authenticating with proxy server./description
+/property
+
+property
   namefs.s3a.attempts.maximum/name
   value10/value
   descriptionHow many times we should retry commands on transient 
errors./description
@@ -731,6 +771,33 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.threads.max/name
+  value256/value
+  description Maximum number of concurrent active (part)uploads,
+which each use a thread from the threadpool./description
+/property
+
+property
+  namefs.s3a.threads.core/name
+  value15/value
+  descriptionNumber of core threads in the threadpool./description
+/property
+
+property
+  namefs.s3a.threads.keepalivetime/name
+  value60/value
+  descriptionNumber of seconds a thread can be idle before being
+terminated./description
+/property
+
+property
+  namefs.s3a.max.total.tasks/name
+  value1000/value
+  descriptionNumber of (part)uploads allowed to the queue before
+blocking additional uploads./description
+/property
+
+property
   namefs.s3a.multipart.size/name
   value104857600/value
   descriptionHow big 

[1/2] hadoop git commit: HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)

2015-02-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 701b96ca8 - f52fcdc2e
  refs/heads/trunk 72389c78d - 78a7e8d3a


HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f52fcdc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f52fcdc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f52fcdc2

Branch: refs/heads/branch-2
Commit: f52fcdc2e023a3e1537bbb4ab65a91f2a28ab972
Parents: 701b96c
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 18:14:31 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 18:14:31 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../src/main/resources/core-default.xml | 71 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 77 +++-
 3 files changed, 146 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f52fcdc2/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a8b38ed..5fce31f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -202,6 +202,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)
 
+HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f52fcdc2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 000c764..46196ae 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -682,12 +682,12 @@ for ldap providers in the same way as above does.
 /property
 
 property
-  namefs.s3a.access.key/name
+  namefs.s3a.awsAccessKeyId/name
   descriptionAWS access key ID. Omit for Role-based 
authentication./description
 /property
 
 property
-  namefs.s3a.secret.key/name
+  namefs.s3a.awsSecretAccessKey/name
   descriptionAWS secret key. Omit for Role-based 
authentication./description
 /property
 
@@ -704,6 +704,46 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.endpoint/name
+  descriptionAWS S3 endpoint to connect to. An up-to-date list is
+provided in the AWS Documentation: regions and endpoints. Without this
+property, the standard region (s3.amazonaws.com) is assumed.
+  /description
+/property
+
+property
+  namefs.s3a.proxy.host/name
+  descriptionHostname of the (optional) proxy server for S3 
connections./description
+/property
+
+property
+  namefs.s3a.proxy.port/name
+  descriptionProxy server port. If this property is not set
+but fs.s3a.proxy.host is, port 80 or 443 is assumed (consistent with
+the value of fs.s3a.connection.ssl.enabled)./description
+/property
+
+property
+  namefs.s3a.proxy.username/name
+  descriptionUsername for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.password/name
+  descriptionPassword for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.domain/name
+  descriptionDomain for authenticating with proxy server./description
+/property
+
+property
+  namefs.s3a.proxy.workstation/name
+  descriptionWorkstation for authenticating with proxy server./description
+/property
+
+property
   namefs.s3a.attempts.maximum/name
   value10/value
   descriptionHow many times we should retry commands on transient 
errors./description
@@ -723,6 +763,33 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.threads.max/name
+  value256/value
+  description Maximum number of concurrent active (part)uploads,
+which each use a thread from the threadpool./description
+/property
+
+property
+  namefs.s3a.threads.core/name
+  value15/value
+  descriptionNumber of core threads in the threadpool./description
+/property
+
+property
+  namefs.s3a.threads.keepalivetime/name
+  value60/value
+  descriptionNumber of seconds a thread can be idle before being
+terminated./description
+/property
+
+property
+  namefs.s3a.max.total.tasks/name
+  value1000/value
+  descriptionNumber of (part)uploads allowed to the queue before
+blocking 

hadoop git commit: HADOOP-11596. Allow smart-apply-patch.sh to add new files in binary git patches (raviprak)

2015-02-17 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk db6606223 - 13d1ba996


HADOOP-11596. Allow smart-apply-patch.sh to add new files in binary git patches 
(raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d1ba99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d1ba99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d1ba99

Branch: refs/heads/trunk
Commit: 13d1ba9965236381e9014fce22120b999c36189a
Parents: db66062
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue Feb 17 11:13:47 2015 -0800
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue Feb 17 11:13:47 2015 -0800

--
 dev-support/smart-apply-patch.sh| 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d1ba99/dev-support/smart-apply-patch.sh
--
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index 3542fb4..03bc4f8 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -38,8 +38,8 @@ is_git_diff_with_prefix() {
 fi
 if [[ $line =~ ^\+\+\+\  ]] ||
[[ $line =~ ^\-\-\-\  ]]; then
-  if ! [[ $line =~ ^[ab]/ ]]; then
-return 1 # All +++ and --- lines must start with a/ or b/.
+  if ! [[ $line =~ ^[ab]/ || $line =~ ^/dev/null ]]; then
+return 1 # All +++ and --- lines must start with a/ or b/ or be 
/dev/null.
   fi
 fi
   done  $1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d1ba99/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6e43872..0de835a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -612,6 +612,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
 
+HADOOP-11596. Allow smart-apply-patch.sh to add new files in binary git
+patches (raviprak)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



[2/2] hadoop git commit: HADOOP-11521. Make connection timeout configurable in s3a. (Thomas Demoor via stevel)

2015-02-17 Thread stevel
HADOOP-11521. Make connection timeout configurable in s3a. (Thomas Demoor via 
stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00b80958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00b80958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00b80958

Branch: refs/heads/trunk
Commit: 00b80958d862dbcc08d6f186c09963d3351ba0fd
Parents: 6be5670
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 20:00:00 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 20:06:27 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../hadoop-common/src/main/resources/core-default.xml  | 6 ++
 .../src/main/java/org/apache/hadoop/fs/s3a/Constants.java  | 4 
 .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java  | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 6 ++
 5 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b80958/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 405e821..c1caf5f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -615,6 +615,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
 
+HADOOP-11521. Make connection timeout configurable in s3a.
+(Thomas Demoor via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b80958/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 5eb301a..41121f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -758,6 +758,12 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.connection.establish.timeout/name
+  value5000/value
+  descriptionSocket connection setup timeout in seconds./description
+/property
+
+property
   namefs.s3a.connection.timeout/name
   value5/value
   descriptionSocket connection timeout in seconds./description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b80958/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index b6863bb..1d4f67b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -42,6 +42,10 @@ public class Constants {
   // number of times we should retry errors
   public static final String MAX_ERROR_RETRIES = fs.s3a.attempts.maximum;
   public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
+
+  // seconds until we give up trying to establish a connection to s3
+  public static final String ESTABLISH_TIMEOUT = 
fs.s3a.connection.establish.timeout;
+  public static final int DEFAULT_ESTABLISH_TIMEOUT = 5;
   
   // seconds until we give up on a connection to s3
   public static final String SOCKET_TIMEOUT = fs.s3a.connection.timeout;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b80958/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 22350bc..4de5c13 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -174,6 +174,8 @@ public class S3AFileSystem extends FileSystem {
 awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
 awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
   DEFAULT_MAX_ERROR_RETRIES));
+awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
+DEFAULT_ESTABLISH_TIMEOUT));
 awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 

[1/2] hadoop git commit: HADOOP-11521. Make connection timeout configurable in s3a. (Thomas Demoor via stevel)

2015-02-17 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8b37b4a78 - 411c8a7cd
  refs/heads/trunk 6be567094 - 00b80958d


HADOOP-11521. Make connection timeout configurable in s3a. (Thomas Demoor via 
stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/411c8a7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/411c8a7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/411c8a7c

Branch: refs/heads/branch-2
Commit: 411c8a7cda0d72f7e0a34fa1e273a065bb620293
Parents: 8b37b4a
Author: Steve Loughran ste...@apache.org
Authored: Tue Feb 17 20:00:00 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Tue Feb 17 20:00:00 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../hadoop-common/src/main/resources/core-default.xml  | 6 ++
 .../src/main/java/org/apache/hadoop/fs/s3a/Constants.java  | 4 
 .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java  | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 6 ++
 5 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/411c8a7c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5fce31f..ae6e78b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -204,6 +204,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
 
+HADOOP-11521. Make connection timeout configurable in s3a.
+(Thomas Demoor via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/411c8a7c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 46196ae..4d93bd9 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -750,6 +750,12 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.connection.establish.timeout/name
+  value5000/value
+  descriptionSocket connection setup timeout in seconds./description
+/property
+
+property
   namefs.s3a.connection.timeout/name
   value5/value
   descriptionSocket connection timeout in seconds./description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/411c8a7c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index b6863bb..1d4f67b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -42,6 +42,10 @@ public class Constants {
   // number of times we should retry errors
   public static final String MAX_ERROR_RETRIES = fs.s3a.attempts.maximum;
   public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
+
+  // seconds until we give up trying to establish a connection to s3
+  public static final String ESTABLISH_TIMEOUT = 
fs.s3a.connection.establish.timeout;
+  public static final int DEFAULT_ESTABLISH_TIMEOUT = 5;
   
   // seconds until we give up on a connection to s3
   public static final String SOCKET_TIMEOUT = fs.s3a.connection.timeout;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/411c8a7c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 22350bc..4de5c13 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -174,6 +174,8 @@ public class S3AFileSystem extends FileSystem {
 awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
 awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
   DEFAULT_MAX_ERROR_RETRIES));
+

hadoop git commit: HDFS-6662. WebHDFS cannot open a file if its path contains %. Contributed by Gerson Carlos.

2015-02-17 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 411c8a7cd - f92a4904b


HDFS-6662. WebHDFS cannot open a file if its path contains %. Contributed by 
Gerson Carlos.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f92a4904
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f92a4904
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f92a4904

Branch: refs/heads/branch-2
Commit: f92a4904bf4822eb6e73e6d225dd2d40c7975315
Parents: 411c8a7
Author: Haohui Mai whe...@apache.org
Authored: Tue Feb 17 13:04:38 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Tue Feb 17 13:05:00 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../server/datanode/web/webhdfs/ParameterParser.java   |  2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  |  7 +++
 .../datanode/web/webhdfs/TestParameterParser.java  | 13 +
 4 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f92a4904/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9bd4fa8..a13014a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -673,6 +673,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
 (Chengbing Liu via yliu)
 
+HDFS-6662. WebHDFS cannot open a file if its path contains %.
+(Gerson Carlos via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f92a4904/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index e1930b0..5749504 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -50,7 +50,7 @@ class ParameterParser {
   private final MapString, ListString params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = decoder.path().substring(WEBHDFS_PREFIX_LENGTH);
+this.path = 
QueryStringDecoder.decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH));
 this.params = decoder.parameters();
 this.conf = conf;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f92a4904/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index ca73506..87d47fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -102,6 +102,13 @@
   menus.change();
 }
 
+function encode_path(abs_path) {
+  abs_path = encodeURIComponent(abs_path);
+  var re = /%2F/g;
+  return abs_path.replace(re, '/');
+}
+
+abs_path = encode_path(abs_path);
 var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS';
 $.get(url).done(function(data) {
   var d = get_response(data, LocatedBlocks);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f92a4904/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 8b4235b..6a6c5d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -52,4 +52,17 @@ public class TestParameterParser {
 final TokenDelegationTokenIdentifier tok2 = 

hadoop git commit: HDFS-6662. WebHDFS cannot open a file if its path contains %. Contributed by Gerson Carlos.

2015-02-17 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 00b80958d - 043e44bc3


HDFS-6662. WebHDFS cannot open a file if its path contains %. Contributed by 
Gerson Carlos.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/043e44bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/043e44bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/043e44bc

Branch: refs/heads/trunk
Commit: 043e44bc36fc7f7c59406d3722b0a93607b6fa49
Parents: 00b8095
Author: Haohui Mai whe...@apache.org
Authored: Tue Feb 17 13:04:38 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Tue Feb 17 13:04:38 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../server/datanode/web/webhdfs/ParameterParser.java   |  2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  |  7 +++
 .../datanode/web/webhdfs/TestParameterParser.java  | 13 +
 4 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/043e44bc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 48eb61c..391005c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -970,6 +970,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
 (Chengbing Liu via yliu)
 
+HDFS-6662. WebHDFS cannot open a file if its path contains %.
+(Gerson Carlos via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/043e44bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index e1930b0..5749504 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -50,7 +50,7 @@ class ParameterParser {
   private final MapString, ListString params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = decoder.path().substring(WEBHDFS_PREFIX_LENGTH);
+this.path = 
QueryStringDecoder.decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH));
 this.params = decoder.parameters();
 this.conf = conf;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/043e44bc/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index ca73506..87d47fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -102,6 +102,13 @@
   menus.change();
 }
 
+function encode_path(abs_path) {
+  abs_path = encodeURIComponent(abs_path);
+  var re = /%2F/g;
+  return abs_path.replace(re, '/');
+}
+
+abs_path = encode_path(abs_path);
 var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS';
 $.get(url).done(function(data) {
   var d = get_response(data, LocatedBlocks);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/043e44bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 8b4235b..6a6c5d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -52,4 +52,17 @@ public class TestParameterParser {
 final TokenDelegationTokenIdentifier tok2 = testParser.delegationToken();

hadoop git commit: HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. (Chengbing Liu via yliu)

2015-02-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 441dfa486 - 54e33baaf


HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. 
(Chengbing Liu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54e33baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54e33baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54e33baa

Branch: refs/heads/branch-2
Commit: 54e33baaf68ed48865d1d082a79cbcfc52494e1f
Parents: 441dfa4
Author: yliu y...@apache.org
Authored: Tue Feb 17 07:50:14 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 17 07:50:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e33baa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6391b34..6bfa34c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -660,6 +660,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
 via aajisaka)
 
+HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
+(Chengbing Liu via yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e33baa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 00e9e98..8a743b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -49,9 +49,6 @@ import com.google.common.annotations.VisibleForTesting;
 public class URLConnectionFactory {
   private static final Log LOG = LogFactory.getLog(URLConnectionFactory.class);
 
-  /** SPNEGO authenticator */
-  private static final KerberosUgiAuthenticator AUTH = new 
KerberosUgiAuthenticator();
-
   /**
* Timeout for socket connects and reads
*/
@@ -161,8 +158,8 @@ public class URLConnectionFactory {
   }
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
   final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
-  return new AuthenticatedURL(AUTH, connConfigurator).openConnection(url,
-  authToken);
+  return new AuthenticatedURL(new KerberosUgiAuthenticator(),
+  connConfigurator).openConnection(url, authToken);
 } else {
   if (LOG.isDebugEnabled()) {
 LOG.debug(open URL connection);



hadoop git commit: HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. (Chengbing Liu via yliu)

2015-02-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9729b244d - 500e6a0f4


HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. 
(Chengbing Liu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e6a0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e6a0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e6a0f

Branch: refs/heads/trunk
Commit: 500e6a0f46d14a591d0ec082b6d26ee59bdfdf76
Parents: 9729b24
Author: yliu y...@apache.org
Authored: Tue Feb 17 07:46:33 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 17 07:46:33 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e6a0f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cc24dc4..8b234fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -957,6 +957,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
 via aajisaka)
 
+HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
+(Chengbing Liu via yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e6a0f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 00e9e98..8a743b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -49,9 +49,6 @@ import com.google.common.annotations.VisibleForTesting;
 public class URLConnectionFactory {
   private static final Log LOG = LogFactory.getLog(URLConnectionFactory.class);
 
-  /** SPNEGO authenticator */
-  private static final KerberosUgiAuthenticator AUTH = new 
KerberosUgiAuthenticator();
-
   /**
* Timeout for socket connects and reads
*/
@@ -161,8 +158,8 @@ public class URLConnectionFactory {
   }
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
   final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
-  return new AuthenticatedURL(AUTH, connConfigurator).openConnection(url,
-  authToken);
+  return new AuthenticatedURL(new KerberosUgiAuthenticator(),
+  connConfigurator).openConnection(url, authToken);
 } else {
   if (LOG.isDebugEnabled()) {
 LOG.debug(open URL connection);



hadoop git commit: MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. (Masatake Iwasaki via kasha)

2015-02-17 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 043e44bc3 - 409113d8f


MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. 
(Masatake Iwasaki via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/409113d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/409113d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/409113d8

Branch: refs/heads/trunk
Commit: 409113d8f97fcfdb96cb028dbb6a20c9a1df81b0
Parents: 043e44b
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Feb 17 14:38:00 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue Feb 17 14:38:00 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt |  3 +++
 .../org/apache/hadoop/mapred/gridmix/TestHighRamJob.java | 11 +--
 2 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/409113d8/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index aebc71e..50e067c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -175,6 +175,9 @@ Trunk (Unreleased)
 
 MAPREDUCE-4413. MR lib dir contains jdiff (which is gpl) (Nemon Lou via aw)
 
+MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. 
+(Masatake Iwasaki via kasha)
+
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
 MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by

http://git-wip-us.apache.org/repos/asf/hadoop/blob/409113d8/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
--
diff --git 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
index 9cc84ea..179c941 100644
--- 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
+++ 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.mapred.gridmix.DebugJobProducer.MockJob;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.rumen.JobStory;
@@ -92,15 +93,13 @@ public class TestHighRamJob {
 
 GridmixJob job = new DummyGridmixJob(simulatedJobConf, story);
 Job simulatedJob = job.getJob();
-Configuration simulatedConf = simulatedJob.getConfiguration();
+JobConf simulatedConf = (JobConf)simulatedJob.getConfiguration();
 
 // check if the high ram properties are not set
 assertEquals(expectedMapMB, 
- simulatedConf.getLong(MRJobConfig.MAP_MEMORY_MB,
-   MRJobConfig.DEFAULT_MAP_MEMORY_MB));
+ simulatedConf.getMemoryRequired(TaskType.MAP));
 assertEquals(expectedReduceMB, 
- simulatedConf.getLong(MRJobConfig.REDUCE_MEMORY_MB, 
-   MRJobConfig.DEFAULT_MAP_MEMORY_MB));
+ simulatedConf.getMemoryRequired(TaskType.REDUCE));
   }
   
   /**
@@ -192,4 +191,4 @@ public class TestHighRamJob {
 assertNotNull(failed);
 assertTrue(Exception expected for exceeding reduce memory limit!, 
failed);
   }
-}
\ No newline at end of file
+}



hadoop git commit: HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned to SERVICE_NOT_RESPONDING (Contributed by Ming Ma)

2015-02-17 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 500e6a0f4 - cf4b7f506


HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned to 
SERVICE_NOT_RESPONDING (Contributed by Ming Ma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf4b7f50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf4b7f50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf4b7f50

Branch: refs/heads/trunk
Commit: cf4b7f506dd338ecf2ed4c643b6a6a334e070fca
Parents: 500e6a0
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Feb 17 14:55:56 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Feb 17 14:55:56 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/ha/HealthMonitor.java | 35 ++
 .../org/apache/hadoop/ha/DummyHAService.java| 73 +---
 .../org/apache/hadoop/ha/TestHealthMonitor.java |  4 +-
 4 files changed, 94 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf4b7f50/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 522ec47..51305bb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -934,6 +934,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11467. KerberosAuthenticator can connect to a non-secure cluster.
 (yzhangal via rkanter)
 
+HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned
+to SERVICE_NOT_RESPONDING (Ming Ma via vinayakumarb)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf4b7f50/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index 0d1..8c87629 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -30,6 +30,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HealthCheckFailedException;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.util.Daemon;
 
@@ -201,18 +202,20 @@ public class HealthMonitor {
 status = proxy.getServiceStatus();
 proxy.monitorHealth();
 healthy = true;
-  } catch (HealthCheckFailedException e) {
-LOG.warn(Service health check failed for  + targetToMonitor
-+ :  + e.getMessage());
-enterState(State.SERVICE_UNHEALTHY);
   } catch (Throwable t) {
-LOG.warn(Transport-level exception trying to monitor health of  +
-targetToMonitor + :  + t.getLocalizedMessage());
-RPC.stopProxy(proxy);
-proxy = null;
-enterState(State.SERVICE_NOT_RESPONDING);
-Thread.sleep(sleepAfterDisconnectMillis);
-return;
+if (isHealthCheckFailedException(t)) {
+  LOG.warn(Service health check failed for  + targetToMonitor
+  + :  + t.getMessage());
+  enterState(State.SERVICE_UNHEALTHY);
+} else {
+  LOG.warn(Transport-level exception trying to monitor health of  +
+  targetToMonitor + :  + t.getCause() +   + 
t.getLocalizedMessage());
+  RPC.stopProxy(proxy);
+  proxy = null;
+  enterState(State.SERVICE_NOT_RESPONDING);
+  Thread.sleep(sleepAfterDisconnectMillis);
+  return;
+}
   }
   
   if (status != null) {
@@ -225,7 +228,15 @@ public class HealthMonitor {
   Thread.sleep(checkIntervalMillis);
 }
   }
-  
+
+  private boolean isHealthCheckFailedException(Throwable t) {
+return ((t instanceof HealthCheckFailedException) ||
+(t instanceof RemoteException 
+((RemoteException)t).unwrapRemoteException(
+HealthCheckFailedException.class) instanceof
+HealthCheckFailedException));
+  }
+
   private synchronized void setLastServiceStatus(HAServiceStatus status) {
 this.lastServiceState = status;
 for (ServiceStateCallback cb : serviceStateCallbacks) {


[1/2] hadoop git commit: HDFS-7703. Support favouredNodes for the append for new blocks ( Contributed by Vinayakumar B)

2015-02-17 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 54e33baaf - 005e1df54


HDFS-7703. Support favouredNodes for the append for new blocks ( Contributed by 
Vinayakumar B)

(cherry picked from commit 89a544928083501625bc69f96b530040228f0a5f)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91a5d929
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91a5d929
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91a5d929

Branch: refs/heads/branch-2
Commit: 91a5d92916c6c1b0475d5794c3855b53b020d4ec
Parents: 54e33ba
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Feb 12 12:38:44 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Feb 17 15:19:03 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 53 +++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  7 ++-
 .../hadoop/hdfs/DistributedFileSystem.java  | 43 
 .../namenode/TestFavoredNodesEndToEnd.java  | 29 +++
 5 files changed, 121 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a5d929/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6bfa34c..a1b2053 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -324,6 +324,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)
 
+HDFS-7703. Support favouredNodes for the append for new blocks
+(vinayakumarb)
+
 HDFS-7694. FSDataInputStream should support unbuffer (cmccabe)
 
 HDFS-7684. The host:port settings of the daemons should be trimmed before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a5d929/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index d27197f..3c0ec99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1693,6 +1693,15 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if(LOG.isDebugEnabled()) {
   LOG.debug(src + : masked= + masked);
 }
+final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
+src, masked, flag, createParent, replication, blockSize, progress,
+buffersize, dfsClientConf.createChecksum(checksumOpt),
+getFavoredNodesStr(favoredNodes));
+beginFileLease(result.getFileId(), result);
+return result;
+  }
+
+  private String[] getFavoredNodesStr(InetSocketAddress[] favoredNodes) {
 String[] favoredNodeStrs = null;
 if (favoredNodes != null) {
   favoredNodeStrs = new String[favoredNodes.length];
@@ -1702,12 +1711,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  + favoredNodes[i].getPort();
   }
 }
-final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
-src, masked, flag, createParent, replication, blockSize, progress,
-buffersize, dfsClientConf.createChecksum(checksumOpt),
-favoredNodeStrs);
-beginFileLease(result.getFileId(), result);
-return result;
+return favoredNodeStrs;
   }
   
   /**
@@ -1725,7 +1729,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 return null;
   }
-  return callAppend(src, buffersize, flag, progress);
+  return callAppend(src, buffersize, flag, progress, null);
 }
 return null;
   }
@@ -1804,7 +1808,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /** Method to get stream returned by append call */
   private DFSOutputStream callAppend(String src, int buffersize,
-  EnumSetCreateFlag flag, Progressable progress) throws IOException {
+  EnumSetCreateFlag flag, Progressable progress, String[] favoredNodes)
+  throws IOException {
 CreateFlag.validateForAppend(flag);
 try {
   LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
@@ -1812,7 +1817,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   return DFSOutputStream.newStreamForAppend(this, src,
   

[2/2] hadoop git commit: HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned to SERVICE_NOT_RESPONDING (Contributed by Ming Ma)

2015-02-17 Thread vinayakumarb
HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned to 
SERVICE_NOT_RESPONDING (Contributed by Ming Ma)

(cherry picked from commit cf4b7f506dd338ecf2ed4c643b6a6a334e070fca)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/005e1df5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/005e1df5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/005e1df5

Branch: refs/heads/branch-2
Commit: 005e1df540aeccbeb371f3bab834b140d85f0ec5
Parents: 91a5d92
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Feb 17 14:55:56 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Feb 17 15:19:44 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/ha/HealthMonitor.java | 35 ++
 .../org/apache/hadoop/ha/DummyHAService.java| 73 +---
 .../org/apache/hadoop/ha/TestHealthMonitor.java |  4 +-
 4 files changed, 94 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/005e1df5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ff71162..f3ab6f7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -537,6 +537,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11467. KerberosAuthenticator can connect to a non-secure cluster.
 (yzhangal via rkanter)
 
+HADOOP-11000. HAServiceProtocol's health state is incorrectly transitioned
+to SERVICE_NOT_RESPONDING (Ming Ma via vinayakumarb)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/005e1df5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index 0d1..8c87629 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -30,6 +30,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HealthCheckFailedException;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.util.Daemon;
 
@@ -201,18 +202,20 @@ public class HealthMonitor {
 status = proxy.getServiceStatus();
 proxy.monitorHealth();
 healthy = true;
-  } catch (HealthCheckFailedException e) {
-LOG.warn(Service health check failed for  + targetToMonitor
-+ :  + e.getMessage());
-enterState(State.SERVICE_UNHEALTHY);
   } catch (Throwable t) {
-LOG.warn(Transport-level exception trying to monitor health of  +
-targetToMonitor + :  + t.getLocalizedMessage());
-RPC.stopProxy(proxy);
-proxy = null;
-enterState(State.SERVICE_NOT_RESPONDING);
-Thread.sleep(sleepAfterDisconnectMillis);
-return;
+if (isHealthCheckFailedException(t)) {
+  LOG.warn(Service health check failed for  + targetToMonitor
+  + :  + t.getMessage());
+  enterState(State.SERVICE_UNHEALTHY);
+} else {
+  LOG.warn(Transport-level exception trying to monitor health of  +
+  targetToMonitor + :  + t.getCause() +   + 
t.getLocalizedMessage());
+  RPC.stopProxy(proxy);
+  proxy = null;
+  enterState(State.SERVICE_NOT_RESPONDING);
+  Thread.sleep(sleepAfterDisconnectMillis);
+  return;
+}
   }
   
   if (status != null) {
@@ -225,7 +228,15 @@ public class HealthMonitor {
   Thread.sleep(checkIntervalMillis);
 }
   }
-  
+
+  private boolean isHealthCheckFailedException(Throwable t) {
+return ((t instanceof HealthCheckFailedException) ||
+(t instanceof RemoteException 
+((RemoteException)t).unwrapRemoteException(
+HealthCheckFailedException.class) instanceof
+HealthCheckFailedException));
+  }
+
   private synchronized void setLastServiceStatus(HAServiceStatus status) {
 this.lastServiceState = status;
 for (ServiceStateCallback cb : serviceStateCallbacks) {


hadoop git commit: HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)

2015-02-17 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf4b7f506 - 2f0f756b2


HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f0f756b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f0f756b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f0f756b

Branch: refs/heads/trunk
Commit: 2f0f756b26ea83e142a5b9379fa75862c2fc6ad5
Parents: cf4b7f5
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 17 21:56:20 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Feb 17 21:56:20 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  2 ++
 .../java/org/apache/hadoop/fs/shell/XAttrCommands.java| 10 --
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java  |  2 +-
 3 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f0f756b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 51305bb..c3aafe8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -605,6 +605,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11589. NetUtils.createSocketAddr should trim the input URI.
 (Rakesh R via ozawa)
 
+HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f0f756b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
index 44e970b..4efda87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.shell;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
 
@@ -77,9 +78,14 @@ class XAttrCommands extends FsCommand {
   name = StringUtils.popOptionWithArgument(-n, args);
   String en = StringUtils.popOptionWithArgument(-e, args);
   if (en != null) {
-encoding = enValueOfFunc.apply(en.toUpperCase());
+try {
+  encoding = enValueOfFunc.apply(en.toUpperCase(Locale.ENGLISH));
+} catch (IllegalArgumentException e) {
+  throw new IllegalArgumentException(
+  Invalid/unsupported encoding option specified:  + en);
+}
 Preconditions.checkArgument(encoding != null,
-  Invalid/unsupported encoding option specified:  + en);
+Invalid/unsupported encoding option specified:  + en);
   }
 
   boolean r = StringUtils.popOption(-R, args);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f0f756b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index 0da263d..d228920 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.io.LimitInputStream;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.LimitInputStream;
 import 

hadoop git commit: HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)

2015-02-17 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 005e1df54 - 00fb0710b


HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)

(cherry picked from commit 2f0f756b26ea83e142a5b9379fa75862c2fc6ad5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00fb0710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00fb0710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00fb0710

Branch: refs/heads/branch-2
Commit: 00fb0710b6e0dce149c41eb9227ff94494c41b92
Parents: 005e1df
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 17 21:56:20 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Feb 17 21:57:58 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  2 ++
 .../java/org/apache/hadoop/fs/shell/XAttrCommands.java| 10 --
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java  |  2 +-
 3 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fb0710/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f3ab6f7..4871f45 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -200,6 +200,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11589. NetUtils.createSocketAddr should trim the input URI.
 (Rakesh R via ozawa)
 
+HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fb0710/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
index 44e970b..4efda87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.shell;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
 
@@ -77,9 +78,14 @@ class XAttrCommands extends FsCommand {
   name = StringUtils.popOptionWithArgument(-n, args);
   String en = StringUtils.popOptionWithArgument(-e, args);
   if (en != null) {
-encoding = enValueOfFunc.apply(en.toUpperCase());
+try {
+  encoding = enValueOfFunc.apply(en.toUpperCase(Locale.ENGLISH));
+} catch (IllegalArgumentException e) {
+  throw new IllegalArgumentException(
+  Invalid/unsupported encoding option specified:  + en);
+}
 Preconditions.checkArgument(encoding != null,
-  Invalid/unsupported encoding option specified:  + en);
+Invalid/unsupported encoding option specified:  + en);
   }
 
   boolean r = StringUtils.popOption(-R, args);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fb0710/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index 0da263d..d228920 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.io.LimitInputStream;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.io.IOUtils;
+import