hadoop git commit: YARN-5262. Optimize sending RMNodeFinishedContainersPulledByAMEvent for every AM heartbeat.

2016-06-28 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1e3476318 -> d3fc1bdde


YARN-5262. Optimize sending RMNodeFinishedContainersPulledByAMEvent for every 
AM heartbeat.

(cherry picked from commit 26b5e6116f392b6be91dca57968259f87554ce33)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fc1bdd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fc1bdd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fc1bdd

Branch: refs/heads/branch-2
Commit: d3fc1bddea2b6ffa94659cb80c3a144ecce69cd5
Parents: 1e34763
Author: Rohith Sharma K S 
Authored: Wed Jun 29 10:08:30 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jun 29 10:13:17 2016 +0530

--
 .../resourcemanager/rmapp/attempt/RMAppAttemptImpl.java | 9 ++---
 .../rmapp/attempt/TestRMAppAttemptTransitions.java  | 7 +++
 2 files changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fc1bdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d210b53..9ec0f82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -826,9 +826,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   }
 }
 
-finishedContainersSentToAM.putIfAbsent(nodeId, new ArrayList
-  ());
-finishedContainersSentToAM.get(nodeId).addAll(finishedContainers);
+if (!finishedContainers.isEmpty()) {
+  finishedContainersSentToAM.putIfAbsent(nodeId,
+  new ArrayList());
+  finishedContainersSentToAM.get(nodeId).addAll(finishedContainers);
+}
   }
 
   return returnList;
@@ -1871,6 +1873,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   eventHandler.handle(new RMNodeFinishedContainersPulledByAMEvent(nodeId,
 containerIdList));
 }
+this.finishedContainersSentToAM.clear();
   }
 
   // Add am container to the list so that am container instance will be

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fc1bdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 3143b94..497c6d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -1444,6 +1444,13 @@ public class TestRMAppAttemptTransitions {
 
Assert.assertTrue(applicationAttempt.getJustFinishedContainers().isEmpty());
 Assert.assertEquals(0, getFinishedContainersSentToAM(applicationAttempt)
 .size());
+
+// verify if no containers to acknowledge to NM then event should not be
+// triggered. Number of times event invoked is 1 i.e on second pull
+containerStatuses = applicationAttempt.pullJustFinishedContainers();
+Assert.assertEquals(0, containerStatuses.size());
+Mockito.verify(rmnodeEventHandler, times(1))
+.handle(Mockito.any(RMNodeEvent.class));
   }
 
   private static List getFinishedContainersSentToAM(



hadoop git commit: YARN-5262. Optimize sending RMNodeFinishedContainersPulledByAMEvent for every AM heartbeat.

2016-06-28 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk b3649adf6 -> 26b5e6116


YARN-5262. Optimize sending RMNodeFinishedContainersPulledByAMEvent for every 
AM heartbeat.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26b5e611
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26b5e611
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26b5e611

Branch: refs/heads/trunk
Commit: 26b5e6116f392b6be91dca57968259f87554ce33
Parents: b3649ad
Author: Rohith Sharma K S 
Authored: Wed Jun 29 10:08:30 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jun 29 10:08:30 2016 +0530

--
 .../resourcemanager/rmapp/attempt/RMAppAttemptImpl.java | 9 ++---
 .../rmapp/attempt/TestRMAppAttemptTransitions.java  | 7 +++
 2 files changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26b5e611/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d210b53..9ec0f82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -826,9 +826,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   }
 }
 
-finishedContainersSentToAM.putIfAbsent(nodeId, new ArrayList
-  ());
-finishedContainersSentToAM.get(nodeId).addAll(finishedContainers);
+if (!finishedContainers.isEmpty()) {
+  finishedContainersSentToAM.putIfAbsent(nodeId,
+  new ArrayList());
+  finishedContainersSentToAM.get(nodeId).addAll(finishedContainers);
+}
   }
 
   return returnList;
@@ -1871,6 +1873,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   eventHandler.handle(new RMNodeFinishedContainersPulledByAMEvent(nodeId,
 containerIdList));
 }
+this.finishedContainersSentToAM.clear();
   }
 
   // Add am container to the list so that am container instance will be

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26b5e611/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 3143b94..497c6d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -1444,6 +1444,13 @@ public class TestRMAppAttemptTransitions {
 
Assert.assertTrue(applicationAttempt.getJustFinishedContainers().isEmpty());
 Assert.assertEquals(0, getFinishedContainersSentToAM(applicationAttempt)
 .size());
+
+// verify if no containers to acknowledge to NM then event should not be
+// triggered. Number of times event invoked is 1 i.e on second pull
+containerStatuses = applicationAttempt.pullJustFinishedContainers();
+Assert.assertEquals(0, containerStatuses.size());
+Mockito.verify(rmnodeEventHandler, times(1))
+.handle(Mockito.any(RMNodeEvent.class));
   }
 
   private static List getFinishedContainersSentToAM(


-
To unsubscribe, e-mail: 

hadoop git commit: HADOOP-9330. Add custom JUnit4 test runner with configurable timeout (Steve Loughran via aw)

2016-06-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1faaa6907 -> 610363559


HADOOP-9330. Add custom JUnit4 test runner with configurable timeout (Steve 
Loughran via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61036355
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61036355
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61036355

Branch: refs/heads/trunk
Commit: 610363559135a725499cf46e256424d16bec98a3
Parents: 1faaa69
Author: Allen Wittenauer 
Authored: Tue Jun 28 20:40:41 2016 -0700
Committer: Allen Wittenauer 
Committed: Tue Jun 28 20:40:41 2016 -0700

--
 .../org/apache/hadoop/test/HadoopTestBase.java  | 67 
 1 file changed, 67 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61036355/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
new file mode 100644
index 000..43d5be8
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import org.junit.Rule;
+import org.junit.rules.Timeout;
+
+/**
+ * A base class for JUnit4 tests that sets a default timeout for all tests
+ * that subclass this test
+ */
+public abstract class HadoopTestBase {
+  /**
+   * System property name to set the test timeout: {@value}
+   */
+  public static final String PROPERTY_TEST_DEFAULT_TIMEOUT =
+"test.default.timeout";
+
+  /**
+   * The default timeout (in milliseconds) if the system property
+   * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}
+   * is not set: {@value}
+   */
+  public static final int TEST_DEFAULT_TIMEOUT_VALUE = 10;
+
+  /**
+   * The JUnit rule that sets the default timeout for tests
+   */
+  @Rule
+  public Timeout defaultTimeout = retrieveTestTimeout();
+
+  /**
+   * Retrieve the test timeout from the system property
+   * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}, falling back to
+   * the value in {@link #TEST_DEFAULT_TIMEOUT_VALUE} if the
+   * property is not defined.
+   * @return the recommended timeout for tests
+   */
+  public static Timeout retrieveTestTimeout() {
+String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
+ Integer.toString(
+   TEST_DEFAULT_TIMEOUT_VALUE));
+int millis;
+try {
+  millis = Integer.parseInt(propval);
+} catch (NumberFormatException e) {
+  //fall back to the default value, as the property cannot be parsed
+  millis = TEST_DEFAULT_TIMEOUT_VALUE;
+}
+return new Timeout(millis);
+  }
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-9321. fix coverage org.apache.hadoop.net (Ivan A. Veselovsky via aw)

2016-06-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 77031a9c3 -> 1faaa6907


HADOOP-9321. fix coverage org.apache.hadoop.net (Ivan A. Veselovsky via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1faaa690
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1faaa690
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1faaa690

Branch: refs/heads/trunk
Commit: 1faaa6907852b193cc5ac34f25d6ae41a1f10e61
Parents: 77031a9
Author: Allen Wittenauer 
Authored: Tue Jun 28 20:37:42 2016 -0700
Committer: Allen Wittenauer 
Committed: Tue Jun 28 20:37:42 2016 -0700

--
 .../apache/hadoop/ipc/TestSocketFactory.java| 201 ++-
 1 file changed, 198 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1faaa690/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
index f2d312e..ce481dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
@@ -17,8 +17,17 @@
  */
 package org.apache.hadoop.ipc;
 
-import static org.junit.Assert.assertSame;
-
+import java.io.BufferedReader;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Proxy;
+import java.net.Proxy.Type;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -29,11 +38,61 @@ import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.SocksSocketFactory;
 import org.apache.hadoop.net.StandardSocketFactory;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
+/**
+ * test StandardSocketFactory and SocksSocketFactory NetUtils
+ *
+ */
 public class TestSocketFactory {
 
+  private static final int START_STOP_TIMEOUT_SEC = 30;
+
+  private ServerRunnable serverRunnable;
+  private Thread serverThread;
+  private int port;
+
+  private void startTestServer() throws Exception {
+// start simple tcp server.
+serverRunnable = new ServerRunnable();
+serverThread = new Thread(serverRunnable);
+serverThread.start();
+final long timeout = System.currentTimeMillis() + START_STOP_TIMEOUT_SEC * 
1000;
+while (!serverRunnable.isReady()) {
+  assertNull(serverRunnable.getThrowable());
+  Thread.sleep(10);
+  if (System.currentTimeMillis() > timeout) {
+fail("Server thread did not start properly in allowed time of "
++ START_STOP_TIMEOUT_SEC + " sec.");
+  }
+}
+port = serverRunnable.getPort();
+  }
+
+  @After
+  public void stopTestServer() throws InterruptedException {
+final Thread t = serverThread;
+if (t != null) {
+  serverThread = null;
+  port = -1;
+  // stop server
+  serverRunnable.stop();
+  t.join(START_STOP_TIMEOUT_SEC * 1000);
+  assertFalse(t.isAlive());
+  assertNull(serverRunnable.getThrowable());
+}
+  }
+
   @Test
   public void testSocketFactoryAsKeyInMap() {
 Map dummyCache = new HashMap();
@@ -64,9 +123,145 @@ public class TestSocketFactory {
   }
 
   /**
-   * A dummy socket factory class that extends the StandardSocketFactory. 
+   * A dummy socket factory class that extends the StandardSocketFactory.
*/
   static class DummySocketFactory extends StandardSocketFactory {
+
+  }
+
+  /**
+   * Test SocksSocketFactory.
+   */
+  @Test (timeout=5000)
+  public void testSocksSocketFactory() throws Exception {
+startTestServer();
+testSocketFactory(new SocksSocketFactory());
+  }
+
+  /**
+   * Test StandardSocketFactory.
+   */
+  @Test (timeout=5000)
+  public void testStandardSocketFactory() throws Exception {
+startTestServer();
+testSocketFactory(new StandardSocketFactory());
+  }
+
+  /*
+   * Common test implementation.
+   */
+  private void testSocketFactory(SocketFactory 

[Hadoop Wiki] Update of "LibHDFS" by AkiraAjisaka

2016-06-28 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "LibHDFS" page has been changed by AkiraAjisaka:
https://wiki.apache.org/hadoop/LibHDFS?action=diff=12=13

Comment:
Fix broken link to libhdfs test cases

  <>
  = Examples =
  
-   The 
[[http://svn.apache.org/viewvc/hadoop/core/trunk/src/c++/libhdfs/hdfs_test.c|test
 cases]] for libhdfs provide some good examples on how to use libhdfs.
+   The 
[[https://git-wip-us.apache.org/repos/asf?p=hadoop.git;a=tree;f=hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests|test
 cases]] for libhdfs provide some good examples on how to use libhdfs.
  
  <>
  <>

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-12864. Remove bin/rcc script. Contributed by Allen Wittenauer.

2016-06-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk c0829f449 -> 77031a9c3


HADOOP-12864. Remove bin/rcc script. Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77031a9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77031a9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77031a9c

Branch: refs/heads/trunk
Commit: 77031a9c37e7e72f8825b9e22aa35b238e924576
Parents: c0829f4
Author: Andrew Wang 
Authored: Tue Jun 28 14:25:02 2016 -0700
Committer: Andrew Wang 
Committed: Tue Jun 28 14:25:02 2016 -0700

--
 .../hadoop-common/src/main/bin/rcc  | 41 
 1 file changed, 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77031a9c/hadoop-common-project/hadoop-common/src/main/bin/rcc
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/rcc 
b/hadoop-common-project/hadoop-common/src/main/bin/rcc
deleted file mode 100755
index c2ec942..000
--- a/hadoop-common-project/hadoop-common/src/main/bin/rcc
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script runs the hadoop core commands.
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "$this")" >/dev/null && pwd -P)
-script="$(basename -- "$this")"
-this="$bin/$script"
-
-HADOOP_DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
-
-if [ $# = 0 ]; then
-  hadoop_exit_with_usage 1
-fi
-
-CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
-
-# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-
-hadoop_finalize
-hadoop_java_exec rcc "${CLASS}" "$@"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-6434. Default permission for creating file should be 644 for WebHdfs/HttpFS. Contributed by Wellington Chevreuil.

2016-06-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk be38e530b -> c0829f449


HDFS-6434. Default permission for creating file should be 644 for 
WebHdfs/HttpFS. Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0829f44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0829f44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0829f44

Branch: refs/heads/trunk
Commit: c0829f449337b78ac0b995e216f7324843e74dd2
Parents: be38e53
Author: Andrew Wang 
Authored: Tue Jun 28 13:55:26 2016 -0700
Committer: Andrew Wang 
Committed: Tue Jun 28 13:55:26 2016 -0700

--
 .../hdfs/web/resources/PermissionParam.java |  36 +-
 .../datanode/web/webhdfs/ParameterParser.java   |   3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |   8 +-
 .../resources/TestWebHdfsCreatePermissions.java | 124 +++
 .../hadoop/hdfs/web/resources/TestParam.java|   3 +-
 5 files changed, 163 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0829f44/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
index 530fd3f..42ff69d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
@@ -28,11 +28,25 @@ public class PermissionParam extends ShortParam {
 
   private static final Domain DOMAIN = new Domain(NAME, 8);
 
-  private static final short DEFAULT_PERMISSION = 0755;
+  private static final short DEFAULT_DIR_PERMISSION = 0755;
 
-  /** @return the default FsPermission. */
-  public static FsPermission getDefaultFsPermission() {
-return new FsPermission(DEFAULT_PERMISSION);
+  private static final short DEFAULT_FILE_PERMISSION = 0644;
+
+  private static final short DEFAULT_SYMLINK_PERMISSION = 0777;
+
+  /** @return the default FsPermission for directory. */
+  public static FsPermission getDefaultDirFsPermission() {
+return new FsPermission(DEFAULT_DIR_PERMISSION);
+  }
+
+  /** @return the default FsPermission for file. */
+  public static FsPermission getDefaultFileFsPermission() {
+return new FsPermission(DEFAULT_FILE_PERMISSION);
+  }
+
+  /** @return the default FsPermission for symlink. */
+  public static FsPermission getDefaultSymLinkFsPermission() {
+return new FsPermission(DEFAULT_SYMLINK_PERMISSION);
   }
 
   /**
@@ -57,8 +71,18 @@ public class PermissionParam extends ShortParam {
   }
 
   /** @return the represented FsPermission. */
-  public FsPermission getFsPermission() {
+  public FsPermission getFileFsPermission() {
+return this.getFsPermission(DEFAULT_FILE_PERMISSION);
+  }
+
+  /** @return the represented FsPermission. */
+  public FsPermission getDirFsPermission() {
+return this.getFsPermission(DEFAULT_DIR_PERMISSION);
+  }
+
+  private FsPermission getFsPermission(short defaultPermission){
 final Short v = getValue();
-return new FsPermission(v != null? v: DEFAULT_PERMISSION);
+return new FsPermission(v != null? v: defaultPermission);
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0829f44/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index 5d85dc4..440a532 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -104,7 +104,8 @@ class ParameterParser {
   }
 
   FsPermission permission() {
-return new PermissionParam(param(PermissionParam.NAME)).getFsPermission();
+return new PermissionParam(param(PermissionParam.NAME)).
+getFileFsPermission();
   }
 
   boolean overwrite() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0829f44/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

hadoop git commit: HDFS-10578. libhdfs++: Silence compile warnings from URI parser. Contributed by James Clampffer

2016-06-28 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 a903f780e -> d643d8c4f


HDFS-10578. libhdfs++: Silence compile warnings from URI parser. Contributed by 
James Clampffer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d643d8c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d643d8c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d643d8c4

Branch: refs/heads/HDFS-8707
Commit: d643d8c4f90248a4f9869f429ae9afc1593a900b
Parents: a903f78
Author: James 
Authored: Tue Jun 28 14:41:44 2016 -0400
Committer: James 
Committed: Tue Jun 28 14:41:44 2016 -0400

--
 .../main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d643d8c4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt
index d752be5..30e9245 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/CMakeLists.txt
@@ -18,8 +18,9 @@
 
 
 #uripaser lib likes to use always_inline and gcc complains
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-attributes")
+#also frees const qualified malloced buffers
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
 
 add_library(uriparser2_obj OBJECT uriparser2/uriparser2.c 
uriparser2/uriparser/UriParse.c uriparser2/uriparser/UriParseBase.c 
 uriparser2/uriparser/UriCommon.c uriparser2/uriparser/UriIp4Base.c 
uriparser2/uriparser/UriIp4.c uriparser2/uriparser/UriEscape.c 
uriparser2/uriparser/UriQuery.c)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-9888. KerberosName static initialization gets default realm, which is unneeded in non-secure deployment. (Dmytro Kabakchei via aw)

2016-06-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 422c73a86 -> be38e530b


HADOOP-9888. KerberosName static initialization gets default realm, which is 
unneeded in non-secure deployment. (Dmytro Kabakchei via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be38e530
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be38e530
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be38e530

Branch: refs/heads/trunk
Commit: be38e530bb23b134758e29c9101f98cf4e1d2c38
Parents: 422c73a
Author: Allen Wittenauer 
Authored: Tue Jun 28 07:22:51 2016 -0700
Committer: Allen Wittenauer 
Committed: Tue Jun 28 07:22:51 2016 -0700

--
 .../authentication/util/KerberosName.java   | 24 ++--
 1 file changed, 12 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be38e530/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 645fbc6..0b668f1 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -80,16 +80,7 @@ public class KerberosName {
*/
   private static List rules;
 
-  private static String defaultRealm;
-
-  static {
-try {
-  defaultRealm = KerberosUtil.getDefaultRealm();
-} catch (Exception ke) {
-LOG.debug("Kerberos krb5 configuration not found, setting default 
realm to empty");
-defaultRealm="";
-}
-  }
+  private static String defaultRealm = null;
 
   @VisibleForTesting
   public static void resetDefaultRealm() {
@@ -124,9 +115,18 @@ public class KerberosName {
 
   /**
* Get the configured default realm.
+   * Used syncronized method here, because double-check locking is overhead.
* @return the default realm from the krb5.conf
*/
-  public String getDefaultRealm() {
+  public static synchronized String getDefaultRealm() {
+if (defaultRealm == null) {
+  try {
+defaultRealm = KerberosUtil.getDefaultRealm();
+  } catch (Exception ke) {
+LOG.debug("Kerberos krb5 configuration not found, setting default 
realm to empty");
+defaultRealm = "";
+  }
+}
 return defaultRealm;
   }
 
@@ -309,7 +309,7 @@ public class KerberosName {
 String apply(String[] params) throws IOException {
   String result = null;
   if (isDefault) {
-if (defaultRealm.equals(params[0])) {
+if (getDefaultRealm().equals(params[0])) {
   result = params[1];
 }
   } else if (params.length - 1 == numOfComponents) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13034. Log message about input options in distcp lacks some items (Takashi Ohnishi via aw)

2016-06-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 23c3ff85a -> 422c73a86


HADOOP-13034. Log message about input options in distcp lacks some items 
(Takashi Ohnishi via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/422c73a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/422c73a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/422c73a8

Branch: refs/heads/trunk
Commit: 422c73a8657d8699920f7db13d4be200e16c4272
Parents: 23c3ff8
Author: Allen Wittenauer 
Authored: Tue Jun 28 07:21:04 2016 -0700
Committer: Allen Wittenauer 
Committed: Tue Jun 28 07:21:04 2016 -0700

--
 .../src/main/java/org/apache/hadoop/tools/DistCpOptions.java | 4 
 .../src/test/java/org/apache/hadoop/tools/TestOptionsParser.java | 1 +
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/422c73a8/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index 1bc65e0..e6f53f5 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -632,6 +632,10 @@ public class DistCpOptions {
 ", deleteMissing=" + deleteMissing +
 ", ignoreFailures=" + ignoreFailures +
 ", overwrite=" + overwrite +
+", append=" + append +
+", useDiff=" + useDiff +
+", fromSnapshot=" + fromSnapshot +
+", toSnapshot=" + toSnapshot +
 ", skipCRC=" + skipCRC +
 ", blocking=" + blocking +
 ", numListstatusThreads=" + numListstatusThreads +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/422c73a8/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index 35109cc..c46bcd9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -387,6 +387,7 @@ public class TestOptionsParser {
 DistCpOptions option = new DistCpOptions(new Path("abc"), new Path("xyz"));
 String val = "DistCpOptions{atomicCommit=false, syncFolder=false, "
 + "deleteMissing=false, ignoreFailures=false, overwrite=false, "
++ "append=false, useDiff=false, fromSnapshot=null, toSnapshot=null, "
 + "skipCRC=false, blocking=true, numListstatusThreads=0, maxMaps=20, "
 + "mapBandwidth=100.0, "
 + "copyStrategy='uniformsize', preserveStatus=[], "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-13209. replace slaves with workers (John Smith via aw)

2016-06-28 Thread aw
HADOOP-13209. replace slaves with workers (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23c3ff85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23c3ff85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23c3ff85

Branch: refs/heads/trunk
Commit: 23c3ff85a9e73d8f0755e14f12cc7c89b72acddd
Parents: 2a0082c
Author: Allen Wittenauer 
Authored: Tue Jun 28 05:53:03 2016 -0700
Committer: Allen Wittenauer 
Committed: Tue Jun 28 05:53:03 2016 -0700

--
 .../hadoop-common/src/main/bin/hadoop   |   6 +-
 .../src/main/bin/hadoop-config.cmd  |   4 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |   4 +
 .../src/main/bin/hadoop-daemons.sh  |   6 +-
 .../src/main/bin/hadoop-functions.sh|  75 ++--
 .../hadoop-common/src/main/bin/slaves.sh|  60 --
 .../hadoop-common/src/main/bin/workers.sh   |  60 ++
 .../hadoop-common/src/main/conf/hadoop-env.sh   |   4 +-
 .../main/conf/hadoop-user-functions.sh.example  |   4 +-
 .../hadoop-common/src/main/conf/workers |   1 +
 .../hadoop-common/src/main/java/overview.html   |  46 +++
 .../src/site/markdown/ClusterSetup.md   |  18 +--
 .../src/site/markdown/CommandsManual.md |   6 +-
 .../src/site/markdown/RackAwareness.md  |   4 +-
 .../src/test/scripts/hadoop_slaves.bats |  37 --
 .../src/test/scripts/hadoop_ssh.bats|  18 +--
 .../src/test/scripts/hadoop_workers.bats|  37 ++
 .../hadoop-hdfs/src/main/bin/hdfs   |  10 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |  12 +-
 .../src/main/bin/start-secure-dns.sh|   2 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh|  12 +-
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh |   2 +-
 .../token/block/BlockTokenSecretManager.java|  47 
 .../hadoop-hdfs/src/main/java/overview.html |  46 +++
 .../hadoop-hdfs/src/site/markdown/Federation.md |   2 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md |   2 +-
 .../hadoop/filecache/DistributedCache.java  |  70 +--
 .../mapreduce/filecache/DistributedCache.java   | 108 -
 .../src/site/markdown/MapReduceTutorial.md  |  22 ++--
 .../apache/hadoop/mapred/ReliabilityTest.java   | 119 ++-
 .../apache/hadoop/mapred/TestLazyOutput.java|  31 ++---
 .../apache/hadoop/mapred/pipes/TestPipes.java   | 105 
 .../mapreduce/TestMapReduceLazyOutput.java  |  44 ---
 .../mapreduce/security/TestBinaryTokenFile.java |  68 ++-
 .../mapreduce/security/TestMRCredentials.java   |   6 +-
 .../hadoop-yarn/bin/start-yarn.sh   |   6 +-
 .../hadoop-yarn/bin/stop-yarn.sh|   6 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  12 +-
 .../hadoop-yarn/bin/yarn-config.cmd |   2 +-
 .../hadoop-yarn/bin/yarn-config.sh  |  20 ++--
 .../hadoop-yarn/bin/yarn-daemons.sh |   6 +-
 hadoop-yarn-project/hadoop-yarn/conf/slaves |   1 -
 hadoop-yarn-project/hadoop-yarn/pom.xml |   2 +-
 43 files changed, 588 insertions(+), 565 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23c3ff85/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 6cf872c..b57a4c1 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -28,7 +28,7 @@ function hadoop_usage
   hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave 
mode"
   hadoop_add_option "loglevel level" "set the log4j level for this command"
   hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
-  hadoop_add_option "slaves" "turn on slave mode"
+  hadoop_add_option "workers" "turn on worker mode"
 
   hadoop_add_subcommand "checknative" "check native Hadoop and compression 
libraries availability"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the 
Hadoop jar and the required libraries"
@@ -205,8 +205,8 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
-if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
-  hadoop_common_slave_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" 
"${HADOOP_USER_PARAMS[@]}"
+if [[ ${HADOOP_WORKER_MODE} = true ]]; then
+  hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" 
"${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23c3ff85/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

[1/2] hadoop git commit: HADOOP-13209. replace slaves with workers (John Smith via aw)

2016-06-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2a0082c51 -> 23c3ff85a


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23c3ff85/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index 16f3afb..1d5b7f2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -85,11 +85,11 @@ A MapReduce *job* usually splits the input data-set into 
independent chunks whic
 
 Typically the compute nodes and the storage nodes are the same, that is, the 
MapReduce framework and the Hadoop Distributed File System (see [HDFS 
Architecture Guide](../../hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) are 
running on the same set of nodes. This configuration allows the framework to 
effectively schedule tasks on the nodes where data is already present, 
resulting in very high aggregate bandwidth across the cluster.
 
-The MapReduce framework consists of a single master `ResourceManager`, one 
slave `NodeManager` per cluster-node, and `MRAppMaster` per application (see 
[YARN Architecture Guide](../../hadoop-yarn/hadoop-yarn-site/YARN.html)).
+The MapReduce framework consists of a single master `ResourceManager`, one 
worker `NodeManager` per cluster-node, and `MRAppMaster` per application (see 
[YARN Architecture Guide](../../hadoop-yarn/hadoop-yarn-site/YARN.html)).
 
 Minimally, applications specify the input/output locations and supply *map* 
and *reduce* functions via implementations of appropriate interfaces and/or 
abstract-classes. These, and other job parameters, comprise the *job 
configuration*.
 
-The Hadoop *job client* then submits the job (jar/executable etc.) and 
configuration to the `ResourceManager` which then assumes the responsibility of 
distributing the software/configuration to the slaves, scheduling tasks and 
monitoring them, providing status and diagnostic information to the job-client.
+The Hadoop *job client* then submits the job (jar/executable etc.) and 
configuration to the `ResourceManager` which then assumes the responsibility of 
distributing the software/configuration to the workers, scheduling tasks and 
monitoring them, providing status and diagnostic information to the job-client.
 
 Although the Hadoop framework is implemented in Java™, MapReduce 
applications need not be written in Java.
 
@@ -213,10 +213,10 @@ Sample text-files as input:
 $ bin/hadoop fs -ls /user/joe/wordcount/input/
 /user/joe/wordcount/input/file01
 /user/joe/wordcount/input/file02
-
+
 $ bin/hadoop fs -cat /user/joe/wordcount/input/file01
 Hello World Bye World
-
+
 $ bin/hadoop fs -cat /user/joe/wordcount/input/file02
 Hello Hadoop Goodbye Hadoop
 
@@ -787,11 +787,11 @@ or Counters.incrCounter(String, String, long) in the 
`map` and/or `reduce` metho
 
 Applications specify the files to be cached via urls (hdfs://) in the `Job`. 
The `DistributedCache` assumes that the files specified via hdfs:// urls are 
already present on the `FileSystem`.
 
-The framework will copy the necessary files to the slave node before any tasks 
for the job are executed on that node. Its efficiency stems from the fact that 
the files are only copied once per job and the ability to cache archives which 
are un-archived on the slaves.
+The framework will copy the necessary files to the worker node before any 
tasks for the job are executed on that node. Its efficiency stems from the fact 
that the files are only copied once per job and the ability to cache archives 
which are un-archived on the workers.
 
 `DistributedCache` tracks the modification timestamps of the cached files. 
Clearly the cache files should not be modified by the application or externally 
while the job is executing.
 
-`DistributedCache` can be used to distribute simple, read-only data/text files 
and more complex types such as archives and jars. Archives (zip, tar, tgz and 
tar.gz files) are *un-archived* at the slave nodes. Files have *execution 
permissions* set.
+`DistributedCache` can be used to distribute simple, read-only data/text files 
and more complex types such as archives and jars. Archives (zip, tar, tgz and 
tar.gz files) are *un-archived* at the worker nodes. Files have *execution 
permissions* set.
 
 The files/archives can be distributed by setting the property 
`mapreduce.job.cache.{files |archives}`. If more than one file/archive has to 
be distributed, they can be added as comma separated paths. The properties can 

hadoop git commit: HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)

2016-06-28 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 47c13c4f7 -> f44d7782f


HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)

(cherry picked from commit 2a0082c51da7cbe2770eddb5f72cd7f8d72fa5f6)
(cherry picked from commit 1e347631817d882353bfb91d68f109cb8232e8c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f44d7782
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f44d7782
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f44d7782

Branch: refs/heads/branch-2.8
Commit: f44d7782f605cfc1ee4ddae6d3a4ce877c9f2fd6
Parents: 47c13c4
Author: Vinayakumar B 
Authored: Tue Jun 28 16:49:39 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Jun 28 16:55:57 2016 +0530

--
 .../hdfs/server/datanode/BPServiceActor.java|  46 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  26 
 .../hdfs/server/datanode/DataNodeMXBean.java|  20 ++-
 .../src/main/webapps/datanode/datanode.html | 129 +++
 .../hadoop-hdfs/src/main/webapps/datanode/dn.js |  70 ++
 .../src/main/webapps/datanode/index.html|  48 +--
 .../server/datanode/TestDataNodeMXBean.java |   4 +
 7 files changed, 297 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44d7782/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 99874dd..70004e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionUtil;
@@ -138,6 +140,10 @@ class BPServiceActor implements Runnable {
 || runningState == BPServiceActor.RunningState.CONNECTING;
   }
 
+  String getRunningState() {
+return runningState.toString();
+  }
+
   @Override
   public String toString() {
 return bpos.toString() + " service to " + nnAddr;
@@ -147,6 +153,22 @@ class BPServiceActor implements Runnable {
 return nnAddr;
   }
 
+  private String getNameNodeAddress() {
+return NetUtils.getHostPortString(getNNSocketAddress());
+  }
+
+  Map getActorInfoMap() {
+final Map info = new HashMap();
+info.put("NamenodeAddress", getNameNodeAddress());
+info.put("BlockPoolID", bpos.getBlockPoolId());
+info.put("ActorState", getRunningState());
+info.put("LastHeartbeat",
+String.valueOf(getScheduler().getLastHearbeatTime()));
+info.put("LastBlockReport",
+String.valueOf(getScheduler().getLastBlockReportTime()));
+return info;
+  }
+
   private final CountDownLatch initialRegistrationComplete;
   private final LifelineSender lifelineSender;
 
@@ -379,6 +401,7 @@ class BPServiceActor implements Runnable {
   (nCmds + " commands: " + Joiner.on("; ").join(cmds +
   ".");
 }
+scheduler.updateLastBlockReportTime(monotonicNow());
 scheduler.scheduleNextBlockReport();
 return cmds.size() == 0 ? null : cmds;
   }
@@ -425,6 +448,7 @@ class BPServiceActor implements Runnable {
 " storage reports from service actor: " + this);
 }
 
+scheduler.updateLastHeartbeatTime(monotonicNow());
 VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
 .getVolumeFailureSummary();
 int numFailedVolumes = volumeFailureSummary != null ?
@@ -996,6 +1020,12 @@ class BPServiceActor implements Runnable {
 volatile long nextLifelineTime = monotonicNow();
 
 @VisibleForTesting
+volatile long lastBlockReportTime = monotonicNow();
+
+@VisibleForTesting
+volatile long lastHeartbeatTime = monotonicNow();
+
+@VisibleForTesting
 boolean resetBlockReportTime = true;
 
 private 

[2/2] hadoop git commit: HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)

2016-06-28 Thread vinayakumarb
HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)

(cherry picked from commit 2a0082c51da7cbe2770eddb5f72cd7f8d72fa5f6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e347631
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e347631
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e347631

Branch: refs/heads/branch-2
Commit: 1e347631817d882353bfb91d68f109cb8232e8c4
Parents: 0a1a2ce
Author: Vinayakumar B 
Authored: Tue Jun 28 16:49:39 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Jun 28 16:51:00 2016 +0530

--
 .../hdfs/server/datanode/BPServiceActor.java|  46 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  26 
 .../hdfs/server/datanode/DataNodeMXBean.java|  20 ++-
 .../src/main/webapps/datanode/datanode.html | 129 +++
 .../hadoop-hdfs/src/main/webapps/datanode/dn.js |  70 ++
 .../src/main/webapps/datanode/index.html|  48 +--
 .../server/datanode/TestDataNodeMXBean.java |   4 +
 7 files changed, 297 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e347631/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 99874dd..70004e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionUtil;
@@ -138,6 +140,10 @@ class BPServiceActor implements Runnable {
 || runningState == BPServiceActor.RunningState.CONNECTING;
   }
 
+  String getRunningState() {
+return runningState.toString();
+  }
+
   @Override
   public String toString() {
 return bpos.toString() + " service to " + nnAddr;
@@ -147,6 +153,22 @@ class BPServiceActor implements Runnable {
 return nnAddr;
   }
 
+  private String getNameNodeAddress() {
+return NetUtils.getHostPortString(getNNSocketAddress());
+  }
+
+  Map getActorInfoMap() {
+final Map info = new HashMap();
+info.put("NamenodeAddress", getNameNodeAddress());
+info.put("BlockPoolID", bpos.getBlockPoolId());
+info.put("ActorState", getRunningState());
+info.put("LastHeartbeat",
+String.valueOf(getScheduler().getLastHearbeatTime()));
+info.put("LastBlockReport",
+String.valueOf(getScheduler().getLastBlockReportTime()));
+return info;
+  }
+
   private final CountDownLatch initialRegistrationComplete;
   private final LifelineSender lifelineSender;
 
@@ -379,6 +401,7 @@ class BPServiceActor implements Runnable {
   (nCmds + " commands: " + Joiner.on("; ").join(cmds +
   ".");
 }
+scheduler.updateLastBlockReportTime(monotonicNow());
 scheduler.scheduleNextBlockReport();
 return cmds.size() == 0 ? null : cmds;
   }
@@ -425,6 +448,7 @@ class BPServiceActor implements Runnable {
 " storage reports from service actor: " + this);
 }
 
+scheduler.updateLastHeartbeatTime(monotonicNow());
 VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
 .getVolumeFailureSummary();
 int numFailedVolumes = volumeFailureSummary != null ?
@@ -996,6 +1020,12 @@ class BPServiceActor implements Runnable {
 volatile long nextLifelineTime = monotonicNow();
 
 @VisibleForTesting
+volatile long lastBlockReportTime = monotonicNow();
+
+@VisibleForTesting
+volatile long lastHeartbeatTime = monotonicNow();
+
+@VisibleForTesting
 boolean resetBlockReportTime = true;
 
 private final AtomicBoolean forceFullBlockReport =
@@ -1033,6 +1063,22 @@ class BPServiceActor implements Runnable {
   return nextHeartbeatTime;
 }
 
+

[1/2] hadoop git commit: HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)

2016-06-28 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0a1a2ce07 -> 1e3476318
  refs/heads/trunk 4fd37eed9 -> 2a0082c51


HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a0082c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a0082c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a0082c5

Branch: refs/heads/trunk
Commit: 2a0082c51da7cbe2770eddb5f72cd7f8d72fa5f6
Parents: 4fd37ee
Author: Vinayakumar B 
Authored: Tue Jun 28 16:49:39 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Jun 28 16:49:39 2016 +0530

--
 .../hdfs/server/datanode/BPServiceActor.java|  46 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  26 
 .../hdfs/server/datanode/DataNodeMXBean.java|  20 ++-
 .../src/main/webapps/datanode/datanode.html | 129 +++
 .../hadoop-hdfs/src/main/webapps/datanode/dn.js |  70 ++
 .../src/main/webapps/datanode/index.html|  48 +--
 .../server/datanode/TestDataNodeMXBean.java |   4 +
 7 files changed, 297 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a0082c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 39f8219..1b61b4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionUtil;
@@ -138,6 +140,10 @@ class BPServiceActor implements Runnable {
 || runningState == BPServiceActor.RunningState.CONNECTING;
   }
 
+  String getRunningState() {
+return runningState.toString();
+  }
+
   @Override
   public String toString() {
 return bpos.toString() + " service to " + nnAddr;
@@ -147,6 +153,22 @@ class BPServiceActor implements Runnable {
 return nnAddr;
   }
 
+  private String getNameNodeAddress() {
+return NetUtils.getHostPortString(getNNSocketAddress());
+  }
+
+  Map getActorInfoMap() {
+final Map info = new HashMap();
+info.put("NamenodeAddress", getNameNodeAddress());
+info.put("BlockPoolID", bpos.getBlockPoolId());
+info.put("ActorState", getRunningState());
+info.put("LastHeartbeat",
+String.valueOf(getScheduler().getLastHearbeatTime()));
+info.put("LastBlockReport",
+String.valueOf(getScheduler().getLastBlockReportTime()));
+return info;
+  }
+
   private final CountDownLatch initialRegistrationComplete;
   private final LifelineSender lifelineSender;
 
@@ -379,6 +401,7 @@ class BPServiceActor implements Runnable {
   (nCmds + " commands: " + Joiner.on("; ").join(cmds +
   ".");
 }
+scheduler.updateLastBlockReportTime(monotonicNow());
 scheduler.scheduleNextBlockReport();
 return cmds.size() == 0 ? null : cmds;
   }
@@ -425,6 +448,7 @@ class BPServiceActor implements Runnable {
 " storage reports from service actor: " + this);
 }
 
+scheduler.updateLastHeartbeatTime(monotonicNow());
 VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
 .getVolumeFailureSummary();
 int numFailedVolumes = volumeFailureSummary != null ?
@@ -996,6 +1020,12 @@ class BPServiceActor implements Runnable {
 volatile long nextLifelineTime = monotonicNow();
 
 @VisibleForTesting
+volatile long lastBlockReportTime = monotonicNow();
+
+@VisibleForTesting
+volatile long lastHeartbeatTime = monotonicNow();
+
+@VisibleForTesting
 boolean resetBlockReportTime = true;
 
 private final AtomicBoolean forceFullBlockReport =
@@ -1033,6 +1063,22 @@ class BPServiceActor implements