hadoop git commit: YARN-3131. YarnClientImpl should check FAILED and KILLED state in submitApplication. Contributed by Chang Li

2015-03-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk f717dc51b - 03cc22945


YARN-3131. YarnClientImpl should check FAILED and KILLED state in 
submitApplication. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03cc2294
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03cc2294
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03cc2294

Branch: refs/heads/trunk
Commit: 03cc22945e5d4e953c06a313b8158389554a6aa7
Parents: f717dc5
Author: Jason Lowe jl...@apache.org
Authored: Wed Mar 4 18:04:22 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Wed Mar 4 18:04:22 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/YarnClientImpl.java| 19 +--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestYarnClient.java| 55 ++--
 4 files changed, 68 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03cc2294/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5eaf4f4..03bb20b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -692,6 +692,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3131. YarnClientImpl should check FAILED and KILLED state in
+submitApplication (Chang Li via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03cc2294/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 6acf7d8..d6b36bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -254,13 +254,22 @@ public class YarnClientImpl extends YarnClient {
 
 int pollCount = 0;
 long startTime = System.currentTimeMillis();
-
+EnumSetYarnApplicationState waitingStates = 
+ EnumSet.of(YarnApplicationState.NEW,
+ YarnApplicationState.NEW_SAVING,
+ YarnApplicationState.SUBMITTED);
+EnumSetYarnApplicationState failToSubmitStates = 
+  EnumSet.of(YarnApplicationState.FAILED,
+  YarnApplicationState.KILLED);
 while (true) {
   try {
-YarnApplicationState state =
-getApplicationReport(applicationId).getYarnApplicationState();
-if (!state.equals(YarnApplicationState.NEW) 
-!state.equals(YarnApplicationState.NEW_SAVING)) {
+ApplicationReport appReport = getApplicationReport(applicationId);
+YarnApplicationState state = appReport.getYarnApplicationState();
+if (!waitingStates.contains(state)) {
+  if(failToSubmitStates.contains(state)) {
+throw new YarnException(Failed to submit  + applicationId + 
+ to YARN :  + appReport.getDiagnostics());
+  }
   LOG.info(Submitted application  + applicationId);
   break;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03cc2294/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index da7d505..782bc43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -646,7 +646,7 @@ public abstract class ProtocolHATestBase extends 
ClientBaseWithFixes {
   ApplicationReport report =
   

hadoop git commit: HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. Contributed by Chris Nauroth.

2015-03-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c24448c27 - d020544ef


HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. 
Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d020544e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d020544e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d020544e

Branch: refs/heads/branch-2
Commit: d020544ef668591c6fdb178749a20b1954b8a75b
Parents: c24448c
Author: Haohui Mai whe...@apache.org
Authored: Wed Mar 4 09:17:21 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Wed Mar 4 09:17:29 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop-hdfs/src/CMakeLists.txt  | 23 +++--
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 92 +++-
 3 files changed, 111 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d020544e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6506451..523c7a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -780,6 +780,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7869. Inconsistency in the return information while performing rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
+(Chris Nauroth via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d020544e/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index aceeac1..563727b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -27,7 +27,15 @@ 
include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLIC
 function(add_dual_library LIBNAME)
 add_library(${LIBNAME} SHARED ${ARGN})
 add_library(${LIBNAME}_static STATIC ${ARGN})
-set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+# Linux builds traditionally ship a libhdfs.a (static linking) and 
libhdfs.so
+# (dynamic linking).  On Windows, we cannot use the same base name for both
+# static and dynamic, because Windows does not use distinct file extensions
+# for a statically linked library vs. a DLL import library.  Both use the
+# .lib extension.  On Windows, we'll build the static library as
+# hdfs_static.lib.
+if (NOT WIN32)
+set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME 
${LIBNAME})
+endif (NOT WIN32)
 endfunction(add_dual_library)
 
 # Link both a static and a dynamic target against some libraries
@@ -105,11 +113,14 @@ else (WIN32)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -g -Wall -O2)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE 
-D_FILE_OFFSET_BITS=64)
+set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -fvisibility=hidden)
 set(OS_DIR main/native/libhdfs/os/posix)
 set(OS_LINK_LIBRARIES pthread)
 set(OUT_DIR target/usr/local/lib)
 endif (WIN32)
 
+add_definitions(-DLIBHDFS_DLL_EXPORT)
+
 include_directories(
 ${GENERATED_JAVAH}
 ${CMAKE_CURRENT_SOURCE_DIR}
@@ -150,7 +161,7 @@ add_executable(test_libhdfs_ops
 main/native/libhdfs/test/test_libhdfs_ops.c
 )
 target_link_libraries(test_libhdfs_ops
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -158,7 +169,7 @@ add_executable(test_libhdfs_read
 main/native/libhdfs/test/test_libhdfs_read.c
 )
 target_link_libraries(test_libhdfs_read
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -166,7 +177,7 @@ add_executable(test_libhdfs_write
 main/native/libhdfs/test/test_libhdfs_write.c
 )
 target_link_libraries(test_libhdfs_write
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -196,7 +207,7 @@ add_executable(test_libhdfs_threaded
 ${OS_DIR}/thread.c
 )
 target_link_libraries(test_libhdfs_threaded
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )
@@ -206,7 +217,7 @@ add_executable(test_libhdfs_zerocopy
 main/native/libhdfs/test/test_libhdfs_zerocopy.c
 )
 target_link_libraries(test_libhdfs_zerocopy
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )


hadoop git commit: HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. Contributed by Chris Nauroth.

2015-03-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3560180b6 - f717dc51b


HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. 
Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f717dc51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f717dc51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f717dc51

Branch: refs/heads/trunk
Commit: f717dc51b27d72ad02732a8da397e4a1cc270514
Parents: 3560180
Author: Haohui Mai whe...@apache.org
Authored: Wed Mar 4 09:17:21 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Wed Mar 4 09:17:21 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop-hdfs/src/CMakeLists.txt  | 23 +++--
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 92 +++-
 3 files changed, 111 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f717dc51/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2037973..62006d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1086,6 +1086,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7869. Inconsistency in the return information while performing rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
+(Chris Nauroth via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f717dc51/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index aceeac1..563727b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -27,7 +27,15 @@ 
include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLIC
 function(add_dual_library LIBNAME)
 add_library(${LIBNAME} SHARED ${ARGN})
 add_library(${LIBNAME}_static STATIC ${ARGN})
-set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+# Linux builds traditionally ship a libhdfs.a (static linking) and 
libhdfs.so
+# (dynamic linking).  On Windows, we cannot use the same base name for both
+# static and dynamic, because Windows does not use distinct file extensions
+# for a statically linked library vs. a DLL import library.  Both use the
+# .lib extension.  On Windows, we'll build the static library as
+# hdfs_static.lib.
+if (NOT WIN32)
+set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME 
${LIBNAME})
+endif (NOT WIN32)
 endfunction(add_dual_library)
 
 # Link both a static and a dynamic target against some libraries
@@ -105,11 +113,14 @@ else (WIN32)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -g -Wall -O2)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE 
-D_FILE_OFFSET_BITS=64)
+set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -fvisibility=hidden)
 set(OS_DIR main/native/libhdfs/os/posix)
 set(OS_LINK_LIBRARIES pthread)
 set(OUT_DIR target/usr/local/lib)
 endif (WIN32)
 
+add_definitions(-DLIBHDFS_DLL_EXPORT)
+
 include_directories(
 ${GENERATED_JAVAH}
 ${CMAKE_CURRENT_SOURCE_DIR}
@@ -150,7 +161,7 @@ add_executable(test_libhdfs_ops
 main/native/libhdfs/test/test_libhdfs_ops.c
 )
 target_link_libraries(test_libhdfs_ops
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -158,7 +169,7 @@ add_executable(test_libhdfs_read
 main/native/libhdfs/test/test_libhdfs_read.c
 )
 target_link_libraries(test_libhdfs_read
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -166,7 +177,7 @@ add_executable(test_libhdfs_write
 main/native/libhdfs/test/test_libhdfs_write.c
 )
 target_link_libraries(test_libhdfs_write
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -196,7 +207,7 @@ add_executable(test_libhdfs_threaded
 ${OS_DIR}/thread.c
 )
 target_link_libraries(test_libhdfs_threaded
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )
@@ -206,7 +217,7 @@ add_executable(test_libhdfs_zerocopy
 main/native/libhdfs/test/test_libhdfs_zerocopy.c
 )
 target_link_libraries(test_libhdfs_zerocopy
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )


hadoop git commit: YARN-3131. YarnClientImpl should check FAILED and KILLED state in submitApplication. Contributed by Chang Li (cherry picked from commit 03cc22945e5d4e953c06a313b8158389554a6aa7)

2015-03-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d020544ef - ab397194c


YARN-3131. YarnClientImpl should check FAILED and KILLED state in 
submitApplication. Contributed by Chang Li
(cherry picked from commit 03cc22945e5d4e953c06a313b8158389554a6aa7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab397194
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab397194
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab397194

Branch: refs/heads/branch-2
Commit: ab397194c384959d7afd53d1a548933a4d5df487
Parents: d020544
Author: Jason Lowe jl...@apache.org
Authored: Wed Mar 4 18:04:22 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Wed Mar 4 18:05:20 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/YarnClientImpl.java| 19 +--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestYarnClient.java| 55 ++--
 4 files changed, 68 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab397194/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 03a4a82..fe2af4b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -650,6 +650,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3131. YarnClientImpl should check FAILED and KILLED state in
+submitApplication (Chang Li via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab397194/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 6acf7d8..d6b36bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -254,13 +254,22 @@ public class YarnClientImpl extends YarnClient {
 
 int pollCount = 0;
 long startTime = System.currentTimeMillis();
-
+EnumSetYarnApplicationState waitingStates = 
+ EnumSet.of(YarnApplicationState.NEW,
+ YarnApplicationState.NEW_SAVING,
+ YarnApplicationState.SUBMITTED);
+EnumSetYarnApplicationState failToSubmitStates = 
+  EnumSet.of(YarnApplicationState.FAILED,
+  YarnApplicationState.KILLED);
 while (true) {
   try {
-YarnApplicationState state =
-getApplicationReport(applicationId).getYarnApplicationState();
-if (!state.equals(YarnApplicationState.NEW) 
-!state.equals(YarnApplicationState.NEW_SAVING)) {
+ApplicationReport appReport = getApplicationReport(applicationId);
+YarnApplicationState state = appReport.getYarnApplicationState();
+if (!waitingStates.contains(state)) {
+  if(failToSubmitStates.contains(state)) {
+throw new YarnException(Failed to submit  + applicationId + 
+ to YARN :  + appReport.getDiagnostics());
+  }
   LOG.info(Submitted application  + applicationId);
   break;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab397194/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index da7d505..782bc43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -646,7 +646,7 @@ public abstract class ProtocolHATestBase extends 

hadoop git commit: HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.

2015-03-04 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03cc22945 - ed70fa142


HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed70fa14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed70fa14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed70fa14

Branch: refs/heads/trunk
Commit: ed70fa142cabdbc1065e4dbbc95e99c8850c4751
Parents: 03cc229
Author: Jing Zhao ji...@apache.org
Authored: Wed Mar 4 10:30:53 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Mar 4 10:30:53 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/tools/CopyListing.java|   4 +-
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  90 +
 .../java/org/apache/hadoop/tools/DistCp.java|  16 +-
 .../apache/hadoop/tools/DistCpConstants.java|   3 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  12 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  34 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 192 ++
 .../org/apache/hadoop/tools/OptionsParser.java  |  24 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |   3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 349 +++
 .../apache/hadoop/tools/TestOptionsParser.java  |  75 +++-
 12 files changed, 790 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed70fa14/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62006d3..3c6d447 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -704,6 +704,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
 (gera)
 
+HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed70fa14/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index a7b68a9..e3c58e9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -224,7 +224,9 @@ public abstract class CopyListing extends Configured {
Credentials credentials,
DistCpOptions options)
   throws IOException {
-
+if (options.shouldUseDiff()) {
+  return new GlobbedCopyListing(configuration, credentials);
+}
 String copyListingClassName = configuration.get(DistCpConstants.
 CONF_LABEL_COPY_LISTING_CLASS, );
 Class? extends CopyListing copyListingClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed70fa14/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
new file mode 100644
index 000..b617de7
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
+import 

hadoop git commit: HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.

2015-03-04 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ab397194c - b2ccf54c1


HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.

(cherry picked from commit ed70fa142cabdbc1065e4dbbc95e99c8850c4751)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2ccf54c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2ccf54c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2ccf54c

Branch: refs/heads/branch-2
Commit: b2ccf54c14be2d83fe06cb9cbd8706c8989af3cb
Parents: ab39719
Author: Jing Zhao ji...@apache.org
Authored: Wed Mar 4 10:30:53 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Mar 4 10:32:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/tools/CopyListing.java|   4 +-
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  90 +
 .../java/org/apache/hadoop/tools/DistCp.java|  16 +-
 .../apache/hadoop/tools/DistCpConstants.java|   3 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  12 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  34 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 192 ++
 .../org/apache/hadoop/tools/OptionsParser.java  |  24 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |   3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 349 +++
 .../apache/hadoop/tools/TestOptionsParser.java  |  75 +++-
 12 files changed, 790 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ccf54c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 523c7a3..42f7c8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -399,6 +399,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
 (gera)
 
+HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ccf54c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index ab5b802..c3cf98e 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -224,7 +224,9 @@ public abstract class CopyListing extends Configured {
Credentials credentials,
DistCpOptions options)
   throws IOException {
-
+if (options.shouldUseDiff()) {
+  return new GlobbedCopyListing(configuration, credentials);
+}
 String copyListingClassName = configuration.get(DistCpConstants.
 CONF_LABEL_COPY_LISTING_CLASS, );
 Class? extends CopyListing copyListingClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ccf54c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
new file mode 100644
index 000..b617de7
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+
+import 

hadoop git commit: MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own class. (Chris Trezzo via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed70fa142 - c66c3ac6b


MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
class. (Chris Trezzo via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c66c3ac6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c66c3ac6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c66c3ac6

Branch: refs/heads/trunk
Commit: c66c3ac6bf9f63177279feec3f2917e4b882e2bc
Parents: ed70fa1
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 14:42:07 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 14:42:07 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../java/org/apache/hadoop/mapreduce/Job.java   |   1 +
 .../hadoop/mapreduce/JobResourceUploader.java   | 363 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 312 +---
 4 files changed, 370 insertions(+), 309 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c66c3ac6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b2ae9d9..212727e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6248. Exposed the internal MapReduce job's information as a 
public
 API in DistCp. (Jing Zhao via vinodkv)
 
+MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
+class. (Chris Trezzo via kasha)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c66c3ac6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index f404175..9eea4cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -98,6 +98,7 @@ public class Job extends JobContextImpl implements JobContext 
{
 mapreduce.client.genericoptionsparser.used;
   public static final String SUBMIT_REPLICATION = 
 mapreduce.client.submit.file.replication;
+  public static final int DEFAULT_SUBMIT_REPLICATION = 10;
 
   @InterfaceStability.Evolving
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c66c3ac6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
new file mode 100644
index 000..eebdf88
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import 

hadoop git commit: MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own class. (Chris Trezzo via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b2ccf54c1 - f4d6c5e33


MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
class. (Chris Trezzo via kasha)

(cherry picked from commit c66c3ac6bf9f63177279feec3f2917e4b882e2bc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d6c5e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d6c5e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d6c5e3

Branch: refs/heads/branch-2
Commit: f4d6c5e337e76dc408c9c8f19e306c3f4ba80d8e
Parents: b2ccf54
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 14:42:07 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 14:47:06 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../java/org/apache/hadoop/mapreduce/Job.java   |   1 +
 .../hadoop/mapreduce/JobResourceUploader.java   | 363 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 312 +---
 4 files changed, 370 insertions(+), 309 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d6c5e3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ed3022a..cb215cb 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -75,6 +75,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6248. Exposed the internal MapReduce job's information as a 
public
 API in DistCp. (Jing Zhao via vinodkv)
 
+MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
+class. (Chris Trezzo via kasha)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d6c5e3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 470290c..9e01292 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -98,6 +98,7 @@ public class Job extends JobContextImpl implements JobContext 
{
 mapreduce.client.genericoptionsparser.used;
   public static final String SUBMIT_REPLICATION = 
 mapreduce.client.submit.file.replication;
+  public static final int DEFAULT_SUBMIT_REPLICATION = 10;
 
   @InterfaceStability.Evolving
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d6c5e3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
new file mode 100644
index 000..eebdf88
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;

hadoop git commit: YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 722b47946 - 53947f37c


YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53947f37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53947f37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53947f37

Branch: refs/heads/trunk
Commit: 53947f37c7a84a84ef4ab1a3cab63ff27c078385
Parents: 722b479
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 17:33:30 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 17:33:30 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../apache/hadoop/yarn/util/CpuTimeTracker.java | 99 
 .../util/LinuxResourceCalculatorPlugin.java | 46 +++--
 .../yarn/util/ProcfsBasedProcessTree.java   | 77 ++-
 .../util/ResourceCalculatorProcessTree.java | 12 ++-
 .../yarn/util/WindowsBasedProcessTree.java  |  7 +-
 .../util/TestLinuxResourceCalculatorPlugin.java |  4 +-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 38 ++--
 .../util/TestResourceCalculatorProcessTree.java |  5 +
 .../monitor/ContainerMetrics.java   | 39 ++--
 .../monitor/ContainersMonitorImpl.java  | 18 
 .../util/NodeManagerHardwareUtils.java  | 16 +++-
 12 files changed, 311 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53947f37/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 03bb20b..0b71bee 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3272. Surface container locality info in RM web UI.
 (Jian He via wangda)
 
+YARN-3122. Metrics for container's actual CPU usage. 
+(Anubhav Dhoot via kasha)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53947f37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
new file mode 100644
index 000..d36848e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CpuTimeTracker {
+  public static final int UNAVAILABLE = -1;
+  final long MINIMUM_UPDATE_INTERVAL;
+
+  // CPU used time since system is on (ms)
+  BigInteger cumulativeCpuTime = BigInteger.ZERO;
+
+  // CPU used time read last time (ms)
+  BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
+
+  // Unix timestamp while reading the CPU time (ms)
+  long sampleTime;
+  long lastSampleTime;
+  float cpuUsage;
+  BigInteger jiffyLengthInMillis;
+
+  public CpuTimeTracker(long jiffyLengthInMillis) {
+this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
+this.cpuUsage = UNAVAILABLE;
+this.sampleTime = UNAVAILABLE;
+this.lastSampleTime = UNAVAILABLE;
+MINIMUM_UPDATE_INTERVAL =  10 * jiffyLengthInMillis;
+  }
+
+  /**
+   * Return percentage of cpu time spent over the time since last update.
+   * CPU time spent is based on elapsed jiffies multiplied by amount of
+   * time for 1 

hadoop git commit: HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp.

2015-03-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk c66c3ac6b - 722b47946


HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/722b4794
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/722b4794
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/722b4794

Branch: refs/heads/trunk
Commit: 722b4794693d8bad1dee0ca5c2f99030a08402f9
Parents: c66c3ac
Author: Kihwal Lee kih...@apache.org
Authored: Wed Mar 4 17:21:51 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Wed Mar 4 17:21:51 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 48 
 .../server/protocol/DatanodeRegistration.java   | 10 
 .../blockmanagement/TestBlockManager.java   |  7 ---
 .../TestComputeInvalidateWork.java  | 16 +--
 .../TestDatanodeProtocolRetryPolicy.java|  3 +-
 6 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/722b4794/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c6d447..2be1a4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1091,6 +1091,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
 (Chris Nauroth via wheat9)
 
+HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/722b4794/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 779e3b9..f91696f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -47,19 +47,23 @@ public class DatanodeID implements ComparableDatanodeID {
   private int infoSecurePort; // info server port
   private int ipcPort;   // IPC server port
   private String xferAddr;
-  private int hashCode = -1;
 
   /**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode. 
* For newly formatted Datanodes it is a UUID.
*/
-  private String datanodeUuid = null;
+  private final String datanodeUuid;
 
   public DatanodeID(DatanodeID from) {
+this(from.getDatanodeUuid(), from);
+  }
+
+  @VisibleForTesting
+  public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
 from.getHostName(),
-from.getDatanodeUuid(),
+datanodeUuid,
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -81,19 +85,24 @@ public class DatanodeID implements ComparableDatanodeID {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-this.ipAddr = ipAddr;
+setIpAndXferPort(ipAddr, xferPort);
 this.hostName = hostName;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-this.xferPort = xferPort;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
-updateXferAddrAndInvalidateHashCode();
   }
   
   public void setIpAddr(String ipAddr) {
+//updated during registration, preserve former xferPort
+setIpAndXferPort(ipAddr, xferPort);
+  }
+
+  private void setIpAndXferPort(String ipAddr, int xferPort) {
+// build xferAddr string to reduce cost of frequent use
 this.ipAddr = ipAddr;
-updateXferAddrAndInvalidateHashCode();
+this.xferPort = xferPort;
+this.xferAddr = ipAddr + : + xferPort;
   }
 
   public void setPeerHostName(String peerHostName) {
@@ -107,12 +116,6 @@ public class DatanodeID implements ComparableDatanodeID {
 return datanodeUuid;
   }
 
-  @VisibleForTesting
-  public void setDatanodeUuidForTesting(String datanodeUuid) {
-this.datanodeUuid = datanodeUuid;
-updateXferAddrAndInvalidateHashCode();
-  }
-
   private String checkDatanodeUuid(String uuid) {
 if (uuid 

hadoop git commit: HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.

2015-03-04 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1749094fa - ada545d3b


HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by 
Dongming Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ada545d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ada545d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ada545d3

Branch: refs/heads/branch-2
Commit: ada545d3b70e30759c9067159f3de16abd885c6b
Parents: 1749094
Author: Dongming Liang dongming.li...@capitalone.com
Authored: Wed Mar 4 17:47:05 2015 -0800
Committer: Konstantin V Shvachko s...@apache.org
Committed: Wed Mar 4 17:52:25 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java  | 4 +++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java | 3 +--
 .../org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 3 ++-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java| 3 ++-
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 2 +-
 11 files changed, 19 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada545d3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6d2ec99..2c3caad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -401,6 +401,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
 
+HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
+(Dongming Liang via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada545d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 628c610..ce96ac9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@@ -351,7 +352,8 @@ public class RemoteBlockReader extends FSInputChecker 
implements BlockReader {
   long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
   DatanodeID datanodeID, PeerCache peerCache) {
 // Path is used only for printing block and file information in debug
-super(new Path(/blk_ + blockId + : + bpid + :of:+ file)/*too non 
path-like?*/,
+super(new Path(/ + Block.BLOCK_FILE_PREFIX + blockId +
+: + bpid + :of:+ file)/*too non path-like?*/,
   1, verifyChecksum,
   checksum.getChecksumSize()  0? checksum : null, 
   checksum.getBytesPerChecksum(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada545d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 754df2c..001f684 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -83,7 +83,6 @@ import java.util.concurrent.Future;
 public class DataStorage extends Storage {
 
   public final static String 

hadoop git commit: YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 430b53718 - 22426a1c9


YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
jobs. (Siqi Li via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22426a1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22426a1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22426a1c

Branch: refs/heads/trunk
Commit: 22426a1c9f4bd616558089b6862fd34ab42d19a7
Parents: 430b537
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 18:06:36 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 18:06:58 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FairScheduler.java   |   1 +
 .../scheduler/fair/MaxRunningAppsEnforcer.java  |  40 ++-
 .../scheduler/fair/TestFairScheduler.java   | 310 ++-
 4 files changed, 348 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22426a1c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b71bee..9a52325 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
+
+YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
+jobs. (Siqi Li via kasha)
 
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22426a1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 2b59716..e8a9555 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1477,6 +1477,7 @@ public class FairScheduler extends
 allocConf = queueInfo;
 allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
 queueMgr.updateAllocationConfiguration(allocConf);
+maxRunningEnforcer.updateRunnabilityOnReload();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22426a1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 2c90edd..f750438 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -105,6 +105,26 @@ public class MaxRunningAppsEnforcer {
   }
 
   /**
+   * This is called after reloading the allocation configuration when the
+   * scheduler is reinitilized
+   *
+   * Checks to see whether any non-runnable applications become runnable
+   * now that the max running apps of given queue has been changed
+   *
+   * Runs in O(n) where n is the number of apps that are non-runnable and in
+   * the queues that went from having no slack to having slack.
+   */
+  public void updateRunnabilityOnReload() {
+FSParentQueue rootQueue = scheduler.getQueueManager().getRootQueue();
+ListListFSAppAttempt 

hadoop git commit: YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ada545d3b - 721d7b574


YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
jobs. (Siqi Li via kasha)

(cherry picked from commit 22426a1c9f4bd616558089b6862fd34ab42d19a7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/721d7b57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/721d7b57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/721d7b57

Branch: refs/heads/branch-2
Commit: 721d7b574126c4070322f70ec5b49a7b8558a4c7
Parents: ada545d
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 18:06:36 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 18:07:33 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FairScheduler.java   |   1 +
 .../scheduler/fair/MaxRunningAppsEnforcer.java  |  40 ++-
 .../scheduler/fair/TestFairScheduler.java   | 310 ++-
 4 files changed, 348 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/721d7b57/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 92c44e9..e648e30 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -655,6 +655,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
+
+YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
+jobs. (Siqi Li via kasha)
 
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/721d7b57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 2b59716..e8a9555 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1477,6 +1477,7 @@ public class FairScheduler extends
 allocConf = queueInfo;
 allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
 queueMgr.updateAllocationConfiguration(allocConf);
+maxRunningEnforcer.updateRunnabilityOnReload();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/721d7b57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 2c90edd..f750438 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -105,6 +105,26 @@ public class MaxRunningAppsEnforcer {
   }
 
   /**
+   * This is called after reloading the allocation configuration when the
+   * scheduler is reinitilized
+   *
+   * Checks to see whether any non-runnable applications become runnable
+   * now that the max running apps of given queue has been changed
+   *
+   * Runs in O(n) where n is the number of apps that are non-runnable and in
+   * the queues that went from having no slack to having slack.
+   */
+  public void updateRunnabilityOnReload() {
+FSParentQueue rootQueue = 

hadoop git commit: HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp. (cherry picked from commit 722b4794693d8bad1dee0ca5c2f99030a08402f9)

2015-03-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f4d6c5e33 - f85530f64


HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn 
Sharp.
(cherry picked from commit 722b4794693d8bad1dee0ca5c2f99030a08402f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f85530f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f85530f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f85530f6

Branch: refs/heads/branch-2
Commit: f85530f649bd7c16bd7c1d4a3447863563d24c03
Parents: f4d6c5e
Author: Kihwal Lee kih...@apache.org
Authored: Wed Mar 4 17:23:00 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Wed Mar 4 17:23:00 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 48 
 .../server/protocol/DatanodeRegistration.java   | 10 
 .../blockmanagement/TestBlockManager.java   |  7 ---
 .../TestComputeInvalidateWork.java  | 16 +--
 .../TestDatanodeProtocolRetryPolicy.java|  3 +-
 6 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85530f6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42f7c8c..6d2ec99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -785,6 +785,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
 (Chris Nauroth via wheat9)
 
+HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85530f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 779e3b9..f91696f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -47,19 +47,23 @@ public class DatanodeID implements ComparableDatanodeID {
   private int infoSecurePort; // info server port
   private int ipcPort;   // IPC server port
   private String xferAddr;
-  private int hashCode = -1;
 
   /**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode. 
* For newly formatted Datanodes it is a UUID.
*/
-  private String datanodeUuid = null;
+  private final String datanodeUuid;
 
   public DatanodeID(DatanodeID from) {
+this(from.getDatanodeUuid(), from);
+  }
+
+  @VisibleForTesting
+  public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
 from.getHostName(),
-from.getDatanodeUuid(),
+datanodeUuid,
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -81,19 +85,24 @@ public class DatanodeID implements ComparableDatanodeID {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-this.ipAddr = ipAddr;
+setIpAndXferPort(ipAddr, xferPort);
 this.hostName = hostName;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-this.xferPort = xferPort;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
-updateXferAddrAndInvalidateHashCode();
   }
   
   public void setIpAddr(String ipAddr) {
+//updated during registration, preserve former xferPort
+setIpAndXferPort(ipAddr, xferPort);
+  }
+
+  private void setIpAndXferPort(String ipAddr, int xferPort) {
+// build xferAddr string to reduce cost of frequent use
 this.ipAddr = ipAddr;
-updateXferAddrAndInvalidateHashCode();
+this.xferPort = xferPort;
+this.xferAddr = ipAddr + : + xferPort;
   }
 
   public void setPeerHostName(String peerHostName) {
@@ -107,12 +116,6 @@ public class DatanodeID implements ComparableDatanodeID {
 return datanodeUuid;
   }
 
-  @VisibleForTesting
-  public void setDatanodeUuidForTesting(String datanodeUuid) {
-this.datanodeUuid = datanodeUuid;
-updateXferAddrAndInvalidateHashCode();

hadoop git commit: YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f85530f64 - 1749094fa


YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)

(cherry picked from commit 53947f37c7a84a84ef4ab1a3cab63ff27c078385)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1749094f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1749094f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1749094f

Branch: refs/heads/branch-2
Commit: 1749094fab45ff772d950af7a1a48b8a0adba778
Parents: f85530f
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 17:33:30 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 17:39:39 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../apache/hadoop/yarn/util/CpuTimeTracker.java | 99 
 .../util/LinuxResourceCalculatorPlugin.java | 46 +++--
 .../yarn/util/ProcfsBasedProcessTree.java   | 77 ++-
 .../util/ResourceCalculatorProcessTree.java | 12 ++-
 .../yarn/util/WindowsBasedProcessTree.java  |  7 +-
 .../util/TestLinuxResourceCalculatorPlugin.java |  4 +-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 38 ++--
 .../util/TestResourceCalculatorProcessTree.java |  5 +
 .../monitor/ContainerMetrics.java   | 39 ++--
 .../monitor/ContainersMonitorImpl.java  | 18 
 .../util/NodeManagerHardwareUtils.java  | 16 +++-
 12 files changed, 311 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1749094f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fe2af4b..92c44e9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -309,6 +309,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3285. (Backport YARN-3168) Convert branch-2 .apt.vm files of YARN to
 markdown. (Masatake Iwasaki via jianhe)
 
+YARN-3122. Metrics for container's actual CPU usage. 
+(Anubhav Dhoot via kasha)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1749094f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
new file mode 100644
index 000..d36848e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CpuTimeTracker {
+  public static final int UNAVAILABLE = -1;
+  final long MINIMUM_UPDATE_INTERVAL;
+
+  // CPU used time since system is on (ms)
+  BigInteger cumulativeCpuTime = BigInteger.ZERO;
+
+  // CPU used time read last time (ms)
+  BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
+
+  // Unix timestamp while reading the CPU time (ms)
+  long sampleTime;
+  long lastSampleTime;
+  float cpuUsage;
+  BigInteger jiffyLengthInMillis;
+
+  public CpuTimeTracker(long jiffyLengthInMillis) {
+this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
+this.cpuUsage = UNAVAILABLE;
+this.sampleTime = UNAVAILABLE;
+this.lastSampleTime = UNAVAILABLE;
+MINIMUM_UPDATE_INTERVAL =  10 * jiffyLengthInMillis;
+  }
+
+  /**
+   * Return percentage of cpu time spent over the time 

hadoop git commit: HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by Liang Xie.

2015-03-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ac874764d - f805d48b1


HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by 
Liang Xie.

(cherry picked from commit 74a4754d1c790b8740a4221f276aa571bc5dbfd5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f805d48b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f805d48b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f805d48b

Branch: refs/heads/branch-2
Commit: f805d48b198471969f070abc49170082dd826e3b
Parents: ac87476
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 16:05:44 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Mar 5 16:06:37 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/net/unix/DomainSocketWatcher.java | 8 +---
 .../org/apache/hadoop/net/unix/TestDomainSocketWatcher.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java| 2 +-
 .../apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java | 3 ++-
 6 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f805d48b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f4728b2..d7b0980 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -227,6 +227,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11658. Externalize io.compression.codecs property.
 (Kai Zheng via aajisaka)
 
+HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
+(Liang Xie via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f805d48b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 8c617dc..03b52e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -238,7 +238,8 @@ public final class DomainSocketWatcher implements Closeable 
{
*/
   private boolean kicked = false;
 
-  public DomainSocketWatcher(int interruptCheckPeriodMs) throws IOException {
+  public DomainSocketWatcher(int interruptCheckPeriodMs, String src)
+  throws IOException {
 if (loadingFailureReason != null) {
   throw new UnsupportedOperationException(loadingFailureReason);
 }
@@ -246,8 +247,9 @@ public final class DomainSocketWatcher implements Closeable 
{
 this.interruptCheckPeriodMs = interruptCheckPeriodMs;
 notificationSockets = DomainSocket.socketpair();
 watcherThread.setDaemon(true);
-watcherThread.setUncaughtExceptionHandler(
-new Thread.UncaughtExceptionHandler() {
+watcherThread.setName(src +  DomainSocketWatcher);
+watcherThread
+.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
   @Override
   public void uncaughtException(Thread thread, Throwable t) {
 LOG.error(thread +  terminating on unexpected exception, t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f805d48b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
index e85e414..4b0e2a8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
@@ -195,7 +195,7 @@ public class TestDomainSocketWatcher {
   private DomainSocketWatcher newDomainSocketWatcher(int 
interruptCheckPeriodMs)
   throws Exception {
 DomainSocketWatcher watcher = new DomainSocketWatcher(
-interruptCheckPeriodMs);
+interruptCheckPeriodMs, getClass().getSimpleName());
 

hadoop git commit: HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng

2015-03-04 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f1b28c19d - 74e174689


HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74e17468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74e17468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74e17468

Branch: refs/heads/HDFS-7285
Commit: 74e1746893d09ac20eea54372fd4f7a7309f551e
Parents: f1b28c1
Author: drankye kai.zh...@intel.com
Authored: Thu Mar 5 22:51:52 2015 +0800
Committer: drankye kai.zh...@intel.com
Committed: Thu Mar 5 22:51:52 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 203 +++
 .../hadoop/io/erasurecode/TestECSchema.java |  54 +
 3 files changed, 261 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e17468/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7bbacf7..ee42c84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -12,3 +12,7 @@
 HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
 ( Kai Zheng )
 
+HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e17468/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
new file mode 100644
index 000..8dc3f45
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * Erasure coding schema to housekeeper relevant information.
+ */
+public class ECSchema {
+  public static final String NUM_DATA_UNITS_KEY = k;
+  public static final String NUM_PARITY_UNITS_KEY = m;
+  public static final String CODEC_NAME_KEY = codec;
+  public static final String CHUNK_SIZE_KEY = chunkSize;
+  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+
+  private String schemaName;
+  private String codecName;
+  private MapString, String options;
+  private int numDataUnits;
+  private int numParityUnits;
+  private int chunkSize;
+
+  /**
+   * Constructor with schema name and provided options. Note the options may
+   * contain additional information for the erasure codec to interpret further.
+   * @param schemaName schema name
+   * @param options schema options
+   */
+  public ECSchema(String schemaName, MapString, String options) {
+assert (schemaName != null  ! schemaName.isEmpty());
+
+this.schemaName = schemaName;
+
+if (options == null || options.isEmpty()) {
+  throw new IllegalArgumentException(No schema options are provided);
+}
+
+String codecName = options.get(CODEC_NAME_KEY);
+if (codecName == null || codecName.isEmpty()) {
+  throw new IllegalArgumentException(No codec option is provided);
+}
+
+int dataUnits = 0, parityUnits = 0;
+try {
+  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
+dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
+  }
+} catch (NumberFormatException e) {
+  throw new IllegalArgumentException(Option value  +
+  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+   is found. It 

hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

2015-03-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f805d48b1 - b569c3ab1


HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be 
non static. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b569c3ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b569c3ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b569c3ab

Branch: refs/heads/branch-2
Commit: b569c3ab1cb7e328dde822f6b2405d24b9560e3a
Parents: f805d48
Author: yliu y...@apache.org
Authored: Thu Mar 5 06:39:58 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 5 06:39:58 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d7b0980..0159a0d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -634,6 +634,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11666. Revert the format change of du output introduced by
 HADOOP-6857. (Byron Wong via aajisaka)
 
+HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+should be non static. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index f3e5b90..2e87f91 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream 
implements
 Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
 CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
 ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index ce72700..120b378 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
 Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;



hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

2015-03-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74a4754d1 - 5e9b8144d


HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be 
non static. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e9b8144
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e9b8144
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e9b8144

Branch: refs/heads/trunk
Commit: 5e9b8144d54f586803212a0bdd8b1c25bdbb1e97
Parents: 74a4754
Author: yliu y...@apache.org
Authored: Thu Mar 5 06:38:45 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 5 06:38:45 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 92af646..65c6d85 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1041,6 +1041,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
 to wrapped FS. (gera)
 
+HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+should be non static. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index f3e5b90..2e87f91 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream 
implements
 Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
 CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
 ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 876ffd6..f1ea0fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
 Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;



hadoop git commit: HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by Liang Xie.

2015-03-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 348208014 - 74a4754d1


HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by 
Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74a4754d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74a4754d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74a4754d

Branch: refs/heads/trunk
Commit: 74a4754d1c790b8740a4221f276aa571bc5dbfd5
Parents: 3482080
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 16:05:44 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Mar 5 16:05:44 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/net/unix/DomainSocketWatcher.java | 8 +---
 .../org/apache/hadoop/net/unix/TestDomainSocketWatcher.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java| 2 +-
 .../apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java | 3 ++-
 6 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74a4754d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d518d9f..92af646 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11658. Externalize io.compression.codecs property.
 (Kai Zheng via aajisaka)
 
+HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
+(Liang Xie via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74a4754d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 8c617dc..03b52e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -238,7 +238,8 @@ public final class DomainSocketWatcher implements Closeable 
{
*/
   private boolean kicked = false;
 
-  public DomainSocketWatcher(int interruptCheckPeriodMs) throws IOException {
+  public DomainSocketWatcher(int interruptCheckPeriodMs, String src)
+  throws IOException {
 if (loadingFailureReason != null) {
   throw new UnsupportedOperationException(loadingFailureReason);
 }
@@ -246,8 +247,9 @@ public final class DomainSocketWatcher implements Closeable 
{
 this.interruptCheckPeriodMs = interruptCheckPeriodMs;
 notificationSockets = DomainSocket.socketpair();
 watcherThread.setDaemon(true);
-watcherThread.setUncaughtExceptionHandler(
-new Thread.UncaughtExceptionHandler() {
+watcherThread.setName(src +  DomainSocketWatcher);
+watcherThread
+.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
   @Override
   public void uncaughtException(Thread thread, Throwable t) {
 LOG.error(thread +  terminating on unexpected exception, t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74a4754d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
index e85e414..4b0e2a8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
@@ -195,7 +195,7 @@ public class TestDomainSocketWatcher {
   private DomainSocketWatcher newDomainSocketWatcher(int 
interruptCheckPeriodMs)
   throws Exception {
 DomainSocketWatcher watcher = new DomainSocketWatcher(
-interruptCheckPeriodMs);
+interruptCheckPeriodMs, getClass().getSimpleName());
 watcher.watcherThread.setUncaughtExceptionHandler(
 new Thread.UncaughtExceptionHandler() {
 

hadoop git commit: MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by Brahma Reddy Battula.

2015-03-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0d62e9488 - ac874764d


MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by 
Brahma Reddy Battula.

(cherry picked from commit 348208014b2023894dc416dcd733a9ccdafc6e25)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac874764
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac874764
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac874764

Branch: refs/heads/branch-2
Commit: ac874764d52fa41a901442efe14a7e08fa0a7682
Parents: 0d62e94
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 14:12:47 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Mar 5 14:13:13 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac874764/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index cb215cb..88660f0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -163,6 +163,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
 via ozawa)
 
+MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
+Reddy Battula via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac874764/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 6301121..f484935 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -1426,10 +1426,6 @@ public class MRAppMaster extends CompositeService {
   String jobUserName = System
   .getenv(ApplicationConstants.Environment.USER.name());
   conf.set(MRJobConfig.USER_NAME, jobUserName);
-  // Do not automatically close FileSystem objects so that in case of
-  // SIGTERM I have a chance to write out the job history. I'll be closing
-  // the objects myself.
-  conf.setBoolean(fs.automatic.close, false);
   initAndStartAppMaster(appMaster, conf, jobUserName);
 } catch (Throwable t) {
   LOG.fatal(Error starting MRAppMaster, t);



hadoop git commit: MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by Brahma Reddy Battula.

2015-03-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8d88691d1 - 348208014


MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34820801
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34820801
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34820801

Branch: refs/heads/trunk
Commit: 348208014b2023894dc416dcd733a9ccdafc6e25
Parents: 8d88691
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 14:12:47 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Mar 5 14:12:47 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34820801/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 212727e..d0d8216 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -414,6 +414,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
 via ozawa)
 
+MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
+Reddy Battula via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34820801/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 8d5be86..5d3ad5b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -1451,10 +1451,6 @@ public class MRAppMaster extends CompositeService {
   String jobUserName = System
   .getenv(ApplicationConstants.Environment.USER.name());
   conf.set(MRJobConfig.USER_NAME, jobUserName);
-  // Do not automatically close FileSystem objects so that in case of
-  // SIGTERM I have a chance to write out the job history. I'll be closing
-  // the objects myself.
-  conf.setBoolean(fs.automatic.close, false);
   initAndStartAppMaster(appMaster, conf, jobUserName);
 } catch (Throwable t) {
   LOG.fatal(Error starting MRAppMaster, t);



hadoop git commit: HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.

2015-03-04 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53947f37c - 430b53718


HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by 
Dongming Liang.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/430b5371
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/430b5371
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/430b5371

Branch: refs/heads/trunk
Commit: 430b5371883e22abb65f37c3e3d4afc3f421fc89
Parents: 53947f3
Author: Dongming Liang dongming.li...@capitalone.com 
Authored: Wed Mar 4 17:47:05 2015 -0800
Committer: Konstantin V Shvachko s...@apache.org
Committed: Wed Mar 4 17:51:12 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java  | 4 +++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java | 3 +--
 .../org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 3 ++-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java| 3 ++-
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 2 +-
 11 files changed, 19 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/430b5371/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2be1a4c..d9008d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
 
+HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
+(Dongming Liang via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/430b5371/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 628c610..ce96ac9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@@ -351,7 +352,8 @@ public class RemoteBlockReader extends FSInputChecker 
implements BlockReader {
   long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
   DatanodeID datanodeID, PeerCache peerCache) {
 // Path is used only for printing block and file information in debug
-super(new Path(/blk_ + blockId + : + bpid + :of:+ file)/*too non 
path-like?*/,
+super(new Path(/ + Block.BLOCK_FILE_PREFIX + blockId +
+: + bpid + :of:+ file)/*too non path-like?*/,
   1, verifyChecksum,
   checksum.getChecksumSize()  0? checksum : null, 
   checksum.getBytesPerChecksum(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/430b5371/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 754df2c..001f684 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -83,7 +83,6 @@ import java.util.concurrent.Future;
 public class DataStorage extends Storage {
 
   public final static String BLOCK_SUBDIR_PREFIX 

hadoop git commit: HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.

2015-03-04 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 721d7b574 - f9a2007af


HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9a2007a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9a2007a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9a2007a

Branch: refs/heads/branch-2
Commit: f9a2007affd2fdcbaedaf03628eee0c2b0831e41
Parents: 721d7b5
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Mar 5 10:21:29 2015 +0800
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Thu Mar 5 10:23:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 478 +++
 2 files changed, 481 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a2007a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c3caad..fe1c970 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -404,6 +404,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
 (Dongming Liang via shv)
 
+HDFS-7746. Add a test randomly mixing append, truncate and snapshot
+operations. (szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a2007a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
new file mode 100644
index 000..5c4c7b4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Test randomly mixing append, snapshot and truncate operations.
+ * Use local file system to simulate the each operation and verify
+ * the correctness.
+ */
+public class TestAppendSnapshotTruncate {
+  static {
+GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
+  }
+  private static final Log LOG = 

hadoop git commit: HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.

2015-03-04 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 22426a1c9 - ded0200e9


HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ded0200e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ded0200e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ded0200e

Branch: refs/heads/trunk
Commit: ded0200e9c98dea960db756bb208ff475d710e28
Parents: 22426a1
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Mar 5 10:21:29 2015 +0800
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Thu Mar 5 10:21:29 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 478 +++
 2 files changed, 481 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ded0200e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d9008d9..f9541e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -709,6 +709,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
 (Dongming Liang via shv)
 
+HDFS-7746. Add a test randomly mixing append, truncate and snapshot
+operations. (szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ded0200e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
new file mode 100644
index 000..5c4c7b4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Test randomly mixing append, snapshot and truncate operations.
+ * Use local file system to simulate the each operation and verify
+ * the correctness.
+ */
+public class TestAppendSnapshotTruncate {
+  static {
+GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
+  }
+  private static final Log LOG = 

hadoop git commit: YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk ded0200e9 - 8d88691d1


YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
events for old client. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d88691d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d88691d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d88691d

Branch: refs/heads/trunk
Commit: 8d88691d162f87f95c9ed7e0a569ef08e8385d4f
Parents: ded0200
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 19:47:02 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 19:49:05 2015 -0800

--
 .../apache/hadoop/ha/ClientBaseWithFixes.java   | 11 +++-
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../recovery/ZKRMStateStore.java| 53 
 .../TestZKRMStateStoreZKClientConnections.java  | 33 +---
 4 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d88691d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 7d0727a..5f03133 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -90,6 +90,14 @@ public abstract class ClientBaseWithFixes extends ZKTestCase 
{
 // XXX this doesn't need to be volatile! (Should probably be final)
 volatile CountDownLatch clientConnected;
 volatile boolean connected;
+protected ZooKeeper client;
+
+public void initializeWatchedClient(ZooKeeper zk) {
+if (client != null) {
+throw new RuntimeException(Watched Client was already set);
+}
+client = zk;
+}
 
 public CountdownWatcher() {
 reset();
@@ -191,8 +199,7 @@ public abstract class ClientBaseWithFixes extends 
ZKTestCase {
 zk.close();
 }
 }
-
-
+watcher.initializeWatchedClient(zk);
 return zk;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d88691d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9a52325..4dd61eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -701,6 +701,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
 jobs. (Siqi Li via kasha)
 
+YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
+events for old client. (Zhihai Xu via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d88691d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 591a551..614ef15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -153,7 +153,13 @@ public class ZKRMStateStore extends RMStateStore {
 
   @VisibleForTesting
   protected ZooKeeper zkClient;
-  private ZooKeeper oldZkClient;
+
+  /* activeZkClient is not used to do actual operations,
+   * it is only used to verify client session for watched events and
+   * it gets activated into zkClient on connection event.
+   */
+  @VisibleForTesting
+  ZooKeeper activeZkClient;
 
   /** Fencing related variables */
   private static final String FENCING_LOCK = RM_ZK_FENCING_LOCK;
@@ -355,21 +361,14 @@ public class ZKRMStateStore extends 

hadoop git commit: YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)

2015-03-04 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f9a2007af - 0d62e9488


YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
events for old client. (Zhihai Xu via kasha)

(cherry picked from commit 8d88691d162f87f95c9ed7e0a569ef08e8385d4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d62e948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d62e948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d62e948

Branch: refs/heads/branch-2
Commit: 0d62e948877e5d50f1b6fbe735a94ac6da5ff472
Parents: f9a2007
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 19:47:02 2015 -0800
Committer: Karthik Kambatla ka...@apache.org
Committed: Wed Mar 4 19:49:21 2015 -0800

--
 .../apache/hadoop/ha/ClientBaseWithFixes.java   | 11 +++-
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../recovery/ZKRMStateStore.java| 53 
 .../TestZKRMStateStoreZKClientConnections.java  | 33 +---
 4 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d62e948/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 7d0727a..5f03133 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -90,6 +90,14 @@ public abstract class ClientBaseWithFixes extends ZKTestCase 
{
 // XXX this doesn't need to be volatile! (Should probably be final)
 volatile CountDownLatch clientConnected;
 volatile boolean connected;
+protected ZooKeeper client;
+
+public void initializeWatchedClient(ZooKeeper zk) {
+if (client != null) {
+throw new RuntimeException(Watched Client was already set);
+}
+client = zk;
+}
 
 public CountdownWatcher() {
 reset();
@@ -191,8 +199,7 @@ public abstract class ClientBaseWithFixes extends 
ZKTestCase {
 zk.close();
 }
 }
-
-
+watcher.initializeWatchedClient(zk);
 return zk;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d62e948/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e648e30..8313b48 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -659,6 +659,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
 jobs. (Siqi Li via kasha)
 
+YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
+events for old client. (Zhihai Xu via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d62e948/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 591a551..614ef15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -153,7 +153,13 @@ public class ZKRMStateStore extends RMStateStore {
 
   @VisibleForTesting
   protected ZooKeeper zkClient;
-  private ZooKeeper oldZkClient;
+
+  /* activeZkClient is not used to do actual operations,
+   * it is only used to verify client session for watched events and
+   * it gets activated into zkClient on connection event.
+   */
+  @VisibleForTesting
+  ZooKeeper activeZkClient;
 
   /** Fencing related variables */
   private static final String FENCING_LOCK = 

hadoop git commit: HDFS-7869. Inconsistency in the return information while performing rolling upgrade ( Contributed by J.Andreina )

2015-03-04 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 29bb68986 - 3560180b6


HDFS-7869. Inconsistency in the return information while performing rolling 
upgrade ( Contributed by J.Andreina )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3560180b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3560180b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3560180b

Branch: refs/heads/trunk
Commit: 3560180b6e9926aa3ee1357da59b28a4b4689a0d
Parents: 29bb689
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Mar 4 14:38:38 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Mar 4 14:38:38 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java   | 6 +++---
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java  | 3 +--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 .../test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java   | 4 ++--
 5 files changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3560180b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ff3c78..2037973 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1083,6 +1083,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
 includes non-snapshotted content. (Charles Lamb via atm)
 
+HDFS-7869. Inconsistency in the return information while performing rolling
+upgrade ( J.Andreina via vinayakumarb )
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3560180b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d2b48f3..77b4a27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7475,7 +7475,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
-  void finalizeRollingUpgrade() throws IOException {
+  RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
 checkSuperuserPrivilege();
 checkOperation(OperationCategory.WRITE);
 writeLock();
@@ -7483,7 +7483,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 try {
   checkOperation(OperationCategory.WRITE);
   if (!isRollingUpgrade()) {
-return;
+return null;
   }
   checkNameNodeSafeMode(Failed to finalize rolling upgrade);
 
@@ -7508,7 +7508,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (auditLog.isInfoEnabled()  isExternalInvocation()) {
   logAuditEvent(true, finalizeRollingUpgrade, null, null, null);
 }
-return;
+return returnInfo;
   }
 
   RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3560180b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9ccdb40..f20fb35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1145,8 +1145,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 case PREPARE:
   return namesystem.startRollingUpgrade();
 case FINALIZE:
-  namesystem.finalizeRollingUpgrade();
-  return null;
+  return namesystem.finalizeRollingUpgrade();
 default:
   throw new UnsupportedActionException(action +  is not yet supported.);
 }


hadoop git commit: HDFS-7869. Inconsistency in the return information while performing rolling upgrade ( Contributed by J.Andreina )

2015-03-04 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 31b3f8460 - c24448c27


HDFS-7869. Inconsistency in the return information while performing rolling 
upgrade ( Contributed by J.Andreina )

(cherry picked from commit 3560180b6e9926aa3ee1357da59b28a4b4689a0d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c24448c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c24448c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c24448c2

Branch: refs/heads/branch-2
Commit: c24448c27b91d0398fb42a0a9ab8b8cdd4887647
Parents: 31b3f84
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Mar 4 14:38:38 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Mar 4 14:39:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java   | 6 +++---
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java  | 3 +--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 .../test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java   | 4 ++--
 5 files changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24448c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bff45bb..6506451 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -777,6 +777,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
 includes non-snapshotted content. (Charles Lamb via atm)
 
+HDFS-7869. Inconsistency in the return information while performing rolling
+upgrade ( J.Andreina via vinayakumarb )
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24448c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 17f313d..81d2b88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7475,7 +7475,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
-  void finalizeRollingUpgrade() throws IOException {
+  RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
 checkSuperuserPrivilege();
 checkOperation(OperationCategory.WRITE);
 writeLock();
@@ -7483,7 +7483,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 try {
   checkOperation(OperationCategory.WRITE);
   if (!isRollingUpgrade()) {
-return;
+return null;
   }
   checkNameNodeSafeMode(Failed to finalize rolling upgrade);
 
@@ -7508,7 +7508,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (auditLog.isInfoEnabled()  isExternalInvocation()) {
   logAuditEvent(true, finalizeRollingUpgrade, null, null, null);
 }
-return;
+return returnInfo;
   }
 
   RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24448c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index a746e8d..b1ae348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1139,8 +1139,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 case PREPARE:
   return namesystem.startRollingUpgrade();
 case FINALIZE:
-  namesystem.finalizeRollingUpgrade();
-  return null;
+  return namesystem.finalizeRollingUpgrade();
 default:
   throw new UnsupportedActionException(action +  is not yet supported.);