hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-08-25 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e99349830 -> df5dbf317


HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df5dbf31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df5dbf31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df5dbf31

Branch: refs/heads/branch-2
Commit: df5dbf317d0fb2d0e35f016e75c230d0f74235fd
Parents: e993498
Author: yliu 
Authored: Tue Aug 25 16:14:11 2015 +0800
Committer: yliu 
Committed: Tue Aug 25 16:14:11 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3100fd0..59817d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -488,6 +488,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 963196b..2eefeb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -314,6 +314,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 Map xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from XAttr
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String namespace = xAttr.getNameSpace().toString();
-retur

hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-08-25 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk af7876787 -> eee0d4563


HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee0d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee0d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee0d456

Branch: refs/heads/trunk
Commit: eee0d4563c62647cfaaed6605ee713aaf69add78
Parents: af78767
Author: yliu 
Authored: Tue Aug 25 16:16:09 2015 +0800
Committer: yliu 
Committed: Tue Aug 25 16:16:09 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7aadcc6..2c47b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -833,6 +833,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9b14168..e6802a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -318,6 +318,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 Map xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from XAttr
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String namespace = xAttr.getNameSpace().toString();
-return Stri

hadoop git commit: HADOOP-12340. test-patch docker mode fails in downloading findbugs with curl (Kengo Seki via aw)

2015-08-25 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 2c4208f2b -> 715f2b6fc


HADOOP-12340. test-patch docker mode fails in downloading findbugs with curl 
(Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/715f2b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/715f2b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/715f2b6f

Branch: refs/heads/HADOOP-12111
Commit: 715f2b6fc4244692dbf843366087a57f6dd7647a
Parents: 2c4208f
Author: Allen Wittenauer 
Authored: Tue Aug 25 09:12:47 2015 -0700
Committer: Allen Wittenauer 
Committed: Tue Aug 25 09:12:47 2015 -0700

--
 dev-support/test-patch-docker/Dockerfile-startstub | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/715f2b6f/dev-support/test-patch-docker/Dockerfile-startstub
--
diff --git a/dev-support/test-patch-docker/Dockerfile-startstub 
b/dev-support/test-patch-docker/Dockerfile-startstub
index c49b589..560d751 100644
--- a/dev-support/test-patch-docker/Dockerfile-startstub
+++ b/dev-support/test-patch-docker/Dockerfile-startstub
@@ -62,7 +62,7 @@ RUN apt-get install -y oracle-java8-installer
 # Install findbugs
 ##
 RUN mkdir -p /opt/findbugs && \
-curl 
https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download
 \
+curl -L 
https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download
 \
  -o /opt/findbugs.tar.gz && \
 tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs
 ENV FINDBUGS_HOME /opt/findbugs



hadoop git commit: HADOOP-12233. if CHANGED_FILES is corrupt, find_changed_modules never returns (Kengo Seki via aw)

2015-08-25 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 715f2b6fc -> a32b5b01b


HADOOP-12233. if CHANGED_FILES is corrupt, find_changed_modules never returns 
(Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a32b5b01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a32b5b01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a32b5b01

Branch: refs/heads/HADOOP-12111
Commit: a32b5b01b709ae11dc80c9f972b2e7b84bbce904
Parents: 715f2b6
Author: Allen Wittenauer 
Authored: Tue Aug 25 09:15:22 2015 -0700
Committer: Allen Wittenauer 
Committed: Tue Aug 25 09:15:22 2015 -0700

--
 dev-support/test-patch.sh | 14 ++
 1 file changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32b5b01/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 4d4b63f..9f08ad3 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1107,6 +1107,7 @@ function find_buildfile_dir
 {
   local buildfile=$1
   local dir=$2
+  local d
 
   yetus_debug "Find ${buildfile} dir for: ${dir}"
 
@@ -1119,6 +1120,12 @@ function find_buildfile_dir
   yetus_debug "ERROR: ${buildfile} is not found."
   return 1
 else
+  d=$(cd -P -- "$(dirname -- "${dir}")" >/dev/null && pwd -P)
+  relative_dir "${d}" >/dev/null
+  if [[ $? == 1 ]]; then
+yetus_debug "ERROR: ${dir} is not in ${BASEDIR}."
+return 1
+  fi
   dir=$(dirname "${dir}")
 fi
   done
@@ -1151,6 +1158,7 @@ function find_changed_files
 function module_skipdir
 {
   local dir=${1}
+  local d
   local i
 
   yetus_debug "Checking skipdirs for ${dir}"
@@ -1170,6 +1178,12 @@ function module_skipdir
 if [[ ${dir} == "." ]]; then
   return 0
 else
+  d=$(cd -P -- "$(dirname -- "${dir}")" >/dev/null && pwd -P)
+  relative_dir "${d}" >/dev/null
+  if [[ $? == 1 ]]; then
+yetus_debug "ERROR: ${dir} is not in ${BASEDIR}."
+return 1
+  fi
   dir=$(dirname "${dir}")
   yetus_debug "Trying to skip: ${dir}"
 fi



hadoop git commit: HADOOP-12301. Fix some test-patch plugins to count the diff lines correctly (Kengo Seki via aw)

2015-08-25 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 a32b5b01b -> e7230d1ad


HADOOP-12301. Fix some test-patch plugins to count the diff lines correctly 
(Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7230d1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7230d1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7230d1a

Branch: refs/heads/HADOOP-12111
Commit: e7230d1ad7f3f2166c05dce88a8338d77bcd6919
Parents: a32b5b0
Author: Allen Wittenauer 
Authored: Tue Aug 25 09:16:59 2015 -0700
Committer: Allen Wittenauer 
Committed: Tue Aug 25 09:16:59 2015 -0700

--
 dev-support/test-patch.d/pylint.sh  | 6 +++---
 dev-support/test-patch.d/rubocop.sh | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7230d1a/dev-support/test-patch.d/pylint.sh
--
diff --git a/dev-support/test-patch.d/pylint.sh 
b/dev-support/test-patch.d/pylint.sh
index 1ee5a8f..6fa576e 100755
--- a/dev-support/test-patch.d/pylint.sh
+++ b/dev-support/test-patch.d/pylint.sh
@@ -148,14 +148,14 @@ function pylint_postapply
   add_footer_table pylint "v${PYLINT_VERSION%,}"
 
   calcdiffs "${PATCH_DIR}/branch-pylint-result.txt" 
"${PATCH_DIR}/patch-pylint-result.txt" > "${PATCH_DIR}/diff-patch-pylint.txt"
-  diffPostpatch=$(${AWK} 'BEGIN {sum=0} 2http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7230d1a/dev-support/test-patch.d/rubocop.sh
--
diff --git a/dev-support/test-patch.d/rubocop.sh 
b/dev-support/test-patch.d/rubocop.sh
index ba9810e..091a2b1 100755
--- a/dev-support/test-patch.d/rubocop.sh
+++ b/dev-support/test-patch.d/rubocop.sh
@@ -70,7 +70,7 @@ function rubocop_preapply
   pushd "${BASEDIR}" >/dev/null
   for i in ${CHANGED_FILES}; do
 if [[ ${i} =~ \.rb$ && -f ${i} ]]; then
-  ${RUBOCOP} -f c "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> 
"${PATCH_DIR}/branch-rubocop-result.txt"
+  ${RUBOCOP} -f e "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> 
"${PATCH_DIR}/branch-rubocop-result.txt"
 fi
   done
   popd >/dev/null
@@ -110,7 +110,7 @@ function rubocop_postapply
   pushd "${BASEDIR}" >/dev/null
   for i in ${CHANGED_FILES}; do
 if [[ ${i} =~ \.rb$ && -f ${i} ]]; then
-  ${RUBOCOP} -f c "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> 
"${PATCH_DIR}/patch-rubocop-result.txt"
+  ${RUBOCOP} -f e "${i}" | ${AWK} '!/[0-9]* files? inspected/' >> 
"${PATCH_DIR}/patch-rubocop-result.txt"
 fi
   done
   popd >/dev/null



[03/42] hadoop git commit: HADOOP-12061. Incorrect command in single cluster setup document. Contributed by Kengo Seki.

2015-08-25 Thread wangda
HADOOP-12061. Incorrect command in single cluster setup document. Contributed 
by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36b1a1e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36b1a1e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36b1a1e7

Branch: refs/heads/YARN-1197
Commit: 36b1a1e784789170350bcd78f394129ce50ba4e4
Parents: 1e06299
Author: Akira Ajisaka 
Authored: Thu Aug 20 11:09:45 2015 +0900
Committer: Akira Ajisaka 
Committed: Thu Aug 20 11:09:45 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/site/markdown/SingleCluster.md.vm   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 943dbac..c033f05 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1096,6 +1096,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
 (Brahma Reddy Battula via jianhe)
 
+HADOOP-12061. Incorrect command in single cluster setup document.
+(Kengo Seki via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index ca5b48c..2de8b2b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -140,7 +140,7 @@ If you cannot ssh to localhost without a passphrase, 
execute the following comma
 
   $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
   $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
-  $ chmod 0700 ~/.ssh/authorized_keys
+  $ chmod 0600 ~/.ssh/authorized_keys
 
 $H3 Execution
 



[04/42] hadoop git commit: HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget. (yliu)

2015-08-25 Thread wangda
HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80a29906
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80a29906
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80a29906

Branch: refs/heads/YARN-1197
Commit: 80a29906bcd718bbba223fa099e523281d9f3369
Parents: 36b1a1e
Author: yliu 
Authored: Thu Aug 20 20:07:18 2015 +0800
Committer: yliu 
Committed: Thu Aug 20 20:07:18 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../BlockPlacementPolicyDefault.java| 176 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 +---
 .../TestDefaultBlockPlacementPolicy.java|  49 +-
 4 files changed, 161 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80a29906/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 080f0d4..a0ca52a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -819,6 +819,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests.
 (Zhe Zhang via jing9)
 
+HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget.
+(yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80a29906/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 9023e0a..3aea5c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -437,17 +437,11 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return writer;
   }
-  
-  /**
-   * Choose localMachine as the target.
-   * if localMachine is not available, 
-   * choose a node on the same rack
-   * @return the chosen storage
-   */
+
   protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
   Set excludedNodes, long blocksize, int maxNodesPerRack,
   List results, boolean avoidStaleNodes,
-  EnumMap storageTypes, boolean fallbackToLocalRack)
+  EnumMap storageTypes)
   throws NotEnoughReplicasException {
 // if no local machine, randomly choose one node
 if (localMachine == null) {
@@ -458,7 +452,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 && clusterMap.contains(localMachine)) {
   DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
   // otherwise try local machine first
-  if (excludedNodes.add(localMachine)) { // was not in the excluded list
+  if (excludedNodes.add(localMachine) // was not in the excluded list
+  && isGoodDatanode(localDatanode, maxNodesPerRack, false,
+  results, avoidStaleNodes)) {
 for (Iterator> iter = storageTypes
 .entrySet().iterator(); iter.hasNext(); ) {
   Map.Entry entry = iter.next();
@@ -466,7 +462,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   localDatanode.getStorageInfos())) {
 StorageType type = entry.getKey();
 if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) {
+results, type) >= 0) {
   int num = entry.getValue();
   if (num == 1) {
 iter.remove();
@@ -479,6 +475,26 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
   } 
 }
+return null;
+  }
+
+  /**
+   * Choose localMachine as the target.
+   * if localMachine is not available,
+   * choose a node on the same rack
+   * @return the chosen storage
+   */
+  protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
+  Set excludedNodes, long blocksize, int maxNodesPerRack,
+  List results, boolean avoidStaleNodes,
+  EnumMap storag

[28/42] hadoop git commit: HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. Contributed by Anu Engineer

2015-08-25 Thread wangda
HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. 
Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48774d0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48774d0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48774d0a

Branch: refs/heads/YARN-1197
Commit: 48774d0a45d95557affbd6bbaf8035cc9575ef36
Parents: b5ce87f
Author: Xiaoyu Yao 
Authored: Mon Aug 24 14:31:24 2015 -0700
Committer: Xiaoyu Yao 
Committed: Mon Aug 24 14:31:24 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/CommonConfigurationKeysPublic.java   |  5 ++
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java | 60 +++
 .../apache/hadoop/ipc/WritableRpcEngine.java|  3 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 48 
 .../apache/hadoop/metrics2/lib/MutableStat.java |  7 +-
 .../src/main/resources/core-default.xml |  9 +++
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  | 77 +++-
 .../org/apache/hadoop/test/MetricsAsserts.java  |  2 +-
 .../hadoop-common/src/test/proto/test.proto |  7 ++
 .../src/test/proto/test_rpc_service.proto   |  1 +
 12 files changed, 223 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b4445fa..4250fc3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -753,6 +753,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
 (hzlu via benoyantony)
 
+HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
+(Anu Engineer via xyao)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 7231d59..24d648f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -235,6 +235,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
   public static final int IPC_SERVER_MAX_CONNECTIONS_DEFAULT = 0;
 
+  /** Logs if a RPC is really slow compared to rest of RPCs. */
+  public static final String IPC_SERVER_LOG_SLOW_RPC =
+"ipc.server.log.slow.rpc";
+  public static final boolean IPC_SERVER_LOG_SLOW_RPC_DEFAULT = false;
+
   /** See core-default.xml */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
 "hadoop.rpc.socket.factory.class.default";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index cc75f5c..532246d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -567,7 +567,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   /**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
-   * exception name and the stack trace are return in the resposne.
+   * exception name and the stack trace are returned in the response.
* See {@link HadoopRpcResponseProto}
* 
* In this method there three types of exceptions possible and they are
@@ -657,6 +657,9 @@ public class ProtobufRpcEngine implements RpcEngine {
   server.rpcMetrics.addRpcProcessingTime(processingTime);
   server.rpcDetailedMetrics.a

[05/42] hadoop git commit: HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. (Kihwal Lee via yliu)

2015-08-25 Thread wangda
HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e8fe894
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e8fe894
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e8fe894

Branch: refs/heads/YARN-1197
Commit: 5e8fe8943718309b5e39a794360aebccae28b331
Parents: 80a2990
Author: yliu 
Authored: Thu Aug 20 20:15:03 2015 +0800
Committer: yliu 
Committed: Thu Aug 20 20:15:03 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  7 +-
 .../BlockPlacementPolicyDefault.java|  3 +-
 .../blockmanagement/DatanodeDescriptor.java | 23 --
 .../blockmanagement/TestReplicationPolicy.java  | 80 +++-
 4 files changed, 84 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a0ca52a..041582f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1196,11 +1196,11 @@ Release 2.7.2 - UNRELEASED
 
   IMPROVEMENTS
 
-  HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
+HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
 
   OPTIMIZATIONS
 
-  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
 
@@ -1215,6 +1215,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
 
+HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
+flawed. (Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3aea5c9..6d7a765 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -868,7 +868,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 final long requiredSize = blockSize * 
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
 final long scheduledSize = blockSize * 
node.getBlocksScheduled(storage.getStorageType());
-final long remaining = node.getRemaining(storage.getStorageType());
+final long remaining = node.getRemaining(storage.getStorageType(),
+requiredSize);
 if (requiredSize > remaining - scheduledSize) {
   logNodeIsNotChosen(storage, "the node does not have enough "
   + storage.getStorageType() + " space"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 9334b5c..7e3c59b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.util.EnumCounters;
@@ -662,16 +663,26 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /**
-   * @return Approximate number of blocks cu

[10/42] hadoop git commit: Creating 2.6.2 entries in CHANGES.txt files.

2015-08-25 Thread wangda
Creating 2.6.2 entries in CHANGES.txt files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33474939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33474939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33474939

Branch: refs/heads/YARN-1197
Commit: 33474939a048ac7bbe07df772fc84979722f
Parents: c8bca62
Author: Vinod Kumar Vavilapalli 
Authored: Thu Aug 20 17:21:06 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Thu Aug 20 17:21:06 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33474939/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c033f05..d6353a4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1868,6 +1868,18 @@ Release 2.7.0 - 2015-04-20
 HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
 Tomcat deployments. (Bowen Zhang via wheat9)
 
+Release 2.6.2 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33474939/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d9d176b..658788c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2355,6 +2355,18 @@ Release 2.7.0 - 2015-04-20
   HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
   Arpit Agarwal)
 
+Release 2.6.2 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33474939/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d2eef32..43d0faf 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -817,6 +817,18 @@ Release 2.7.0 - 2015-04-20
 MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal 
 to a reducer. (Jason Lowe via junping_du)
 
+Release 2.6.2 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33474939/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 07b6339..2518eb5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1750,6 +1750,18 @@ Release 2.7.0 - 2015-04-20
 YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
 renewal of applications part of a bigger workflow. (Daryn Sharp via 
vinodkv)
 
+Release 2.6.2 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[34/42] hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-08-25 Thread wangda
HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee0d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee0d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee0d456

Branch: refs/heads/YARN-1197
Commit: eee0d4563c62647cfaaed6605ee713aaf69add78
Parents: af78767
Author: yliu 
Authored: Tue Aug 25 16:16:09 2015 +0800
Committer: yliu 
Committed: Tue Aug 25 16:16:09 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7aadcc6..2c47b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -833,6 +833,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9b14168..e6802a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -318,6 +318,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 Map xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from XAttr
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String namespace = xAttr.getNameSpace().toString();
-return StringUtils.toLowerCase(namespace) + "." + xAttr.getName();
+
+return getPref

[02/42] hadoop git commit: HADOOP-12317. Applications fail on NM restart on some linux distro because NM container recovery declares AM container as LOST (adhoot via rkanter)

2015-08-25 Thread wangda
HADOOP-12317. Applications fail on NM restart on some linux distro because NM 
container recovery declares AM container as LOST (adhoot via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e06299d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e06299d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e06299d

Branch: refs/heads/YARN-1197
Commit: 1e06299df82b98795124fe8a33578c111e744ff4
Parents: 4e14f79
Author: Robert Kanter 
Authored: Wed Aug 19 19:00:51 2015 -0700
Committer: Robert Kanter 
Committed: Wed Aug 19 19:00:51 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++
 .../main/java/org/apache/hadoop/util/Shell.java | 11 --
 .../java/org/apache/hadoop/util/TestShell.java  | 39 
 3 files changed, 51 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e06299d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d07adcb..943dbac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1063,6 +1063,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12322. typos in rpcmetrics.java. (Anu Engineer via
 Arpit Agarwal)
 
+HADOOP-12317. Applications fail on NM restart on some linux distro
+because NM container recovery declares AM container as LOST
+(adhoot via rkanter)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e06299d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index ed83e8d..e426955 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -212,13 +212,18 @@ abstract public class Shell {
   public static String[] getCheckProcessIsAliveCommand(String pid) {
 return Shell.WINDOWS ?
   new String[] { Shell.WINUTILS, "task", "isAlive", pid } :
-  new String[] { "kill", "-0", isSetsidAvailable ? "-" + pid : pid };
+  isSetsidAvailable ?
+new String[] { "kill", "-0", "--", "-" + pid } :
+new String[] { "kill", "-0", pid };
   }
 
   /** Return a command to send a signal to a given pid */
   public static String[] getSignalKillCommand(int code, String pid) {
-return Shell.WINDOWS ? new String[] { Shell.WINUTILS, "task", "kill", pid 
} :
-  new String[] { "kill", "-" + code, isSetsidAvailable ? "-" + pid : pid };
+return Shell.WINDOWS ?
+  new String[] { Shell.WINUTILS, "task", "kill", pid } :
+  isSetsidAvailable ?
+new String[] { "kill", "-" + code, "--", "-" + pid } :
+new String[] { "kill", "-" + code, pid };
   }
 
   public static final String ENV_NAME_REGEX = "[A-Za-z_][A-Za-z0-9_]*";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e06299d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
index d9dc9ef..a96a0c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.util;
 
 import junit.framework.TestCase;
+import org.junit.Assert;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -150,6 +151,44 @@ public class TestShell extends TestCase {
 System.err.println("after: " + timersAfter);
 assertEquals(timersBefore, timersAfter);
   }
+
+  public void testGetCheckProcessIsAliveCommand() throws Exception {
+String anyPid = "";
+String[] checkProcessAliveCommand = Shell.getCheckProcessIsAliveCommand(
+anyPid);
+
+String[] expectedCommand;
+
+if (Shell.WINDOWS) {
+  expectedCommand =
+  new String[]{ Shell.WINUTILS, "task", "isAlive", anyPid };
+} else if (Shell.isSetsidAvailable) {
+  expectedCommand = new String[]{ "kill", "-0", "--", "-" + anyPid };
+} else {
+  expectedCommand = new String[]{"kill", "-0", anyPid};
+}
+Assert.assert

[16/42] hadoop git commit: HDFS-8823. Move replication factor into individual blocks. Contributed by Haohui Mai.

2015-08-25 Thread wangda
HDFS-8823. Move replication factor into individual blocks. Contributed by 
Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/745d04be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/745d04be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/745d04be

Branch: refs/heads/YARN-1197
Commit: 745d04be59accf80feda0ad38efcc74ba362f2ca
Parents: 7087e70
Author: Haohui Mai 
Authored: Sat Aug 22 00:09:40 2015 -0700
Committer: Haohui Mai 
Committed: Sat Aug 22 00:09:40 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 +-
 .../server/blockmanagement/BlockCollection.java |  6 --
 .../hdfs/server/blockmanagement/BlockInfo.java  | 20 +-
 .../server/blockmanagement/BlockManager.java| 65 +---
 .../blockmanagement/DecommissionManager.java| 15 +++--
 .../hdfs/server/namenode/FSDirAppendOp.java |  2 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   | 50 +--
 .../hdfs/server/namenode/FSDirConcatOp.java |  4 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java |  5 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |  7 ++-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |  2 +
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../hdfs/server/namenode/FSDirectory.java   | 17 +
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hadoop/hdfs/server/namenode/INode.java  | 52 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 29 -
 .../hdfs/server/namenode/NamenodeFsck.java  |  5 +-
 .../snapshot/FSImageFormatPBSnapshot.java   | 12 +++-
 .../snapshot/FileWithSnapshotFeature.java   | 44 -
 .../blockmanagement/TestBlockManager.java   |  3 -
 .../blockmanagement/TestPendingReplication.java |  1 -
 .../blockmanagement/TestReplicationPolicy.java  |  4 +-
 .../hdfs/server/namenode/TestINodeFile.java |  7 ++-
 .../snapshot/TestFileWithSnapshotFeature.java   |  7 ++-
 .../namenode/snapshot/TestSnapshotDeletion.java | 16 +++--
 .../snapshot/TestSnapshotReplication.java   | 31 ++
 26 files changed, 257 insertions(+), 157 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/745d04be/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7f99fc7..08602d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -824,7 +824,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8828. Utilize Snapshot diff report to build diff copy list in distcp.
 (Yufei Gu via Yongjun Zhang)
- 
+
+HDFS-8823. Move replication factor into individual blocks. (wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/745d04be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index a3b4401..3952cc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -55,12 +55,6 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
-   * Get block replication for the collection 
-   * @return block replication value
-   */
-  public short getPreferredBlockReplication();
-
-  /** 
* @return the storage policy ID.
*/
   public byte getStoragePolicyID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/745d04be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 659be56..72fc005 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -40,9 +40,14 @@ public abstract class  BlockInfo extends Block
 
   public static final Bloc

[15/42] hadoop git commit: HDFS-8924. Add pluggable interface for reading replicas in DFSClient. (Colin Patrick McCabe via Lei Xu)

2015-08-25 Thread wangda
HDFS-8924. Add pluggable interface for reading replicas in DFSClient. (Colin 
Patrick McCabe via Lei Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7087e700
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7087e700
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7087e700

Branch: refs/heads/YARN-1197
Commit: 7087e700e032dabc174ecc12b62c12e7d49b995f
Parents: caa636b
Author: Lei Xu 
Authored: Fri Aug 21 17:02:00 2015 -0700
Committer: Lei Xu 
Committed: Fri Aug 21 17:02:00 2015 -0700

--
 .../org/apache/hadoop/hdfs/ReplicaAccessor.java |  88 ++
 .../hadoop/hdfs/ReplicaAccessorBuilder.java | 101 +++
 .../hdfs/client/HdfsClientConfigKeys.java   |   3 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  50 +++-
 .../src/main/proto/datatransfer.proto   |   4 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |  48 +++
 .../apache/hadoop/hdfs/ExternalBlockReader.java | 120 
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   3 +-
 .../hdfs/protocol/datatransfer/Receiver.java|   1 +
 .../hadoop/hdfs/TestExternalBlockReader.java| 298 +++
 10 files changed, 712 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7087e700/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReplicaAccessor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReplicaAccessor.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReplicaAccessor.java
new file mode 100644
index 000..720e6a1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReplicaAccessor.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * The public API for ReplicaAccessor objects.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public abstract class ReplicaAccessor {
+  /**
+   * Read bytes from the replica.
+   *
+   * @param posThe position in the replica to start reading at.
+   * Must not be negative.
+   * @param bufThe byte array to read into.
+   * @param offThe offset within buf to start reading into.
+   * @param lenThe maximum length to read.
+   *
+   * @return   The number of bytes read.  If the read extends past the end
+   *  of the replica, a short read count will be returned.  We
+   *  will never return a negative number.  We will never
+   *  return a short read count unless EOF is reached.
+   */
+  public abstract int read(long pos, byte[] buf, int off, int len)
+  throws IOException;
+
+  /**
+   * Read bytes from the replica.
+   *
+   * @param posThe position in the replica to start reading at.
+   * Must not be negative.
+   * @param bufThe byte buffer to read into.  The amount to read will be
+   * dictated by the remaining bytes between the current
+   * position and the limit.  The ByteBuffer may or may not be
+   * direct.
+   *
+   * @return   The number of bytes read.  If the read extends past the end
+   * of the replica, a short read count will be returned.  We
+   * will never return a negative number.  We will never return
+   * a short read count unless EOF is reached.
+   */
+  public abstract int read(long pos, ByteBuffer buf) throws IOException;
+
+  /**
+   * Release the resources associated with the ReplicaAccessor.
+   *
+   * It is recommended that implementations never throw an IOException.  The
+   * method is declared as throwing IOException in order to remain compatible

[06/42] hadoop git commit: HDFS-8828. Utilize Snapshot diff report to build diff copy list in distcp. (Yufei Gu via Yongjun Zhang)

2015-08-25 Thread wangda
HDFS-8828. Utilize Snapshot diff report to build diff copy list in distcp. 
(Yufei Gu via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc15cb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc15cb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc15cb6

Branch: refs/heads/YARN-1197
Commit: 0bc15cb6e60dc60885234e01dec1c7cb4557a926
Parents: 5e8fe89
Author: Yongjun Zhang 
Authored: Thu Aug 20 08:02:54 2015 -0700
Committer: Yongjun Zhang 
Committed: Thu Aug 20 08:02:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/tools/CopyListing.java|  15 +-
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  32 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  27 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |   4 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 308 +++--
 .../apache/hadoop/tools/SimpleCopyListing.java  | 151 +++-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 345 +--
 .../apache/hadoop/tools/TestOptionsParser.java  |  22 +-
 9 files changed, 786 insertions(+), 121 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc15cb6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 041582f..fad2a867 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -822,6 +822,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget.
 (yliu)
 
+HDFS-8828. Utilize Snapshot diff report to build diff copy list in distcp.
+(Yufei Gu via Yongjun Zhang)
+ 
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc15cb6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index e3c58e9..1efc56c 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.security.Credentials;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -46,7 +48,7 @@ import com.google.common.collect.Sets;
 public abstract class CopyListing extends Configured {
 
   private Credentials credentials;
-
+  static final Log LOG = LogFactory.getLog(DistCp.class);
   /**
* Build listing function creates the input listing that distcp uses to
* perform the copy.
@@ -89,6 +91,7 @@ public abstract class CopyListing extends Configured {
 config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, 
getNumberOfPaths());
 
 validateFinalListing(pathToListFile, options);
+LOG.info("Number of paths in the copy list: " + this.getNumberOfPaths());
   }
 
   /**
@@ -153,6 +156,7 @@ public abstract class CopyListing extends Configured {
   Text currentKey = new Text();
   Set aclSupportCheckFsSet = Sets.newHashSet();
   Set xAttrSupportCheckFsSet = Sets.newHashSet();
+  long idx = 0;
   while (reader.next(currentKey)) {
 if (currentKey.equals(lastKey)) {
   CopyListingFileStatus currentFileStatus = new 
CopyListingFileStatus();
@@ -178,6 +182,12 @@ public abstract class CopyListing extends Configured {
   }
 }
 lastKey.set(currentKey);
+
+if (options.shouldUseDiff() && LOG.isDebugEnabled()) {
+  LOG.debug("Copy list entry " + idx + ": " +
+  lastFileStatus.getPath().toUri().getPath());
+  idx++;
+}
   }
 } finally {
   IOUtils.closeStream(reader);
@@ -224,9 +234,6 @@ public abstract class CopyListing extends Configured {
Credentials credentials,
DistCpOptions options)
   throws IOException {
-if (options.shouldUseDiff()) {
-  return new GlobbedCopyListing(configuration, credentials);
-}
 String copyListingClassName = configuration.get(DistCpConstants.
 CONF_LABEL_COPY_LISTING_CLASS, "");
 Class copyListingClass;

[21/42] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-25 Thread wangda
HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/490bb5eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/490bb5eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/490bb5eb

Branch: refs/heads/YARN-1197
Commit: 490bb5ebd6c6d6f9c08fcad167f976687fc3aa42
Parents: 61bf9ca
Author: Haohui Mai 
Authored: Sat Aug 22 13:30:19 2015 -0700
Committer: Haohui Mai 
Committed: Sat Aug 22 13:31:03 2015 -0700

--
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |  82 +++
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  | 132 
 .../java/org/apache/hadoop/hdfs/net/Peer.java   | 123 
 .../datatransfer/BlockConstructionStage.java|  62 ++
 .../datatransfer/DataTransferProtoUtil.java | 146 +
 .../datatransfer/DataTransferProtocol.java  | 202 ++
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |  66 ++
 .../hdfs/protocol/datatransfer/Sender.java  | 261 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 254 
 .../token/block/InvalidBlockTokenException.java |  41 ++
 .../hdfs/server/datanode/CachingStrategy.java   |  76 +++
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  | 119 
 .../hdfs/shortcircuit/DfsClientShmManager.java  | 522 +++
 .../hdfs/shortcircuit/ShortCircuitShm.java  | 647 +++
 .../hadoop/hdfs/util/ExactSizeInputStream.java  | 125 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  10 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|   6 +-
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |  82 ---
 .../apache/hadoop/hdfs/RemoteBlockReader.java   |   4 +-
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |   4 +-
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  | 132 
 .../java/org/apache/hadoop/hdfs/net/Peer.java   | 123 
 .../datatransfer/BlockConstructionStage.java|  62 --
 .../datatransfer/DataTransferProtoUtil.java | 148 -
 .../datatransfer/DataTransferProtocol.java  | 201 --
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |  66 --
 .../hdfs/protocol/datatransfer/PipelineAck.java |   2 +-
 .../hdfs/protocol/datatransfer/Receiver.java|   7 +-
 .../hdfs/protocol/datatransfer/Sender.java  | 261 
 .../datatransfer/sasl/DataTransferSaslUtil.java |   2 +-
 ...tDatanodeProtocolServerSideTranslatorPB.java |   2 +-
 .../ClientDatanodeProtocolTranslatorPB.java |   6 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  28 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |   4 +-
 .../InterDatanodeProtocolTranslatorPB.java  |   2 +-
 .../NamenodeProtocolTranslatorPB.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 228 +--
 .../token/block/InvalidBlockTokenException.java |  41 --
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   2 +-
 .../hdfs/server/datanode/CachingStrategy.java   |  76 ---
 .../hadoop/hdfs/server/datanode/DataNode.java   |   4 +-
 .../hdfs/server/datanode/DataXceiver.java   |  14 +-
 .../server/namenode/FSImageFormatPBINode.java   |   5 +-
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  | 119 
 .../hdfs/shortcircuit/DfsClientShmManager.java  | 514 ---
 .../hdfs/shortcircuit/ShortCircuitCache.java|   4 +-
 .../hdfs/shortcircuit/ShortCircuitShm.java  | 646 --
 .../hadoop/hdfs/util/ExactSizeInputStream.java  | 125 
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  20 +-
 52 files changed, 2949 insertions(+), 2873 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
new file mode 100644
index 000..7b9e8e3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENS

[07/42] hadoop git commit: YARN-2923. Support configuration based NodeLabelsProvider Service in Distributed Node Label Configuration Setup. (Naganarasimha G R)

2015-08-25 Thread wangda
YARN-2923. Support configuration based NodeLabelsProvider Service in 
Distributed Node Label Configuration Setup. (Naganarasimha G R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc07464d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc07464d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc07464d

Branch: refs/heads/YARN-1197
Commit: fc07464d1a48b0413da5e921614430e41263fdb7
Parents: 0bc15cb
Author: Wangda Tan 
Authored: Thu Aug 20 11:51:03 2015 -0700
Committer: Wangda Tan 
Committed: Thu Aug 20 11:51:03 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  30 +++
 .../nodelabels/CommonNodeLabelsManager.java |   2 +-
 .../src/main/resources/yarn-default.xml |  47 
 .../yarn/server/nodemanager/NodeManager.java|  39 ++-
 .../nodemanager/NodeStatusUpdaterImpl.java  | 259 ++-
 .../nodelabels/AbstractNodeLabelsProvider.java  | 146 +++
 .../ConfigurationNodeLabelsProvider.java|  81 ++
 .../server/nodemanager/TestNodeManager.java |  50 +++-
 .../TestNodeStatusUpdaterForLabels.java |  76 +-
 .../TestConfigurationNodeLabelsProvider.java| 146 +++
 11 files changed, 793 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc07464d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b22777c..07b6339 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -175,6 +175,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4055. Report node resource utilization in heartbeat. 
 (Inigo Goiri via kasha)
 
+YARN-2923. Support configuration based NodeLabelsProvider Service in 
Distributed 
+Node Label Configuration Setup. (Naganarasimha G R)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc07464d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6c438f2..55eac85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1967,6 +1967,36 @@ public class YarnConfiguration extends Configuration {
 NODELABEL_CONFIGURATION_TYPE, DEFAULT_NODELABEL_CONFIGURATION_TYPE));
   }
 
+  private static final String NM_NODE_LABELS_PREFIX = NM_PREFIX
+  + "node-labels.";
+
+  public static final String NM_NODE_LABELS_PROVIDER_CONFIG =
+  NM_NODE_LABELS_PREFIX + "provider";
+
+  // whitelist names for the yarn.nodemanager.node-labels.provider
+  public static final String CONFIG_NODE_LABELS_PROVIDER = "config";
+
+  private static final String NM_NODE_LABELS_PROVIDER_PREFIX =
+  NM_NODE_LABELS_PREFIX + "provider.";
+
+  // If -1 is configured then no timer task should be created
+  public static final String NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS =
+  NM_NODE_LABELS_PROVIDER_PREFIX + "fetch-interval-ms";
+
+  public static final String NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS =
+  NM_NODE_LABELS_PROVIDER_PREFIX + "fetch-timeout-ms";
+
+  // once in 10 mins
+  public static final long DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS =
+  10 * 60 * 1000;
+
+  // Twice of default interval time
+  public static final long DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS =
+  DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS * 2;
+
+  public static final String NM_PROVIDER_CONFIGURED_NODE_LABELS =
+  NM_NODE_LABELS_PROVIDER_PREFIX + "configured-node-labels";
+
   public YarnConfiguration() {
 super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc07464d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 34e6832..8cc3770 10

[11/42] hadoop git commit: MAPREDUCE-6357. MultipleOutputs.write() API should document that output committing is not utilized when input path is absolute. Contributed by Dustin Cote.

2015-08-25 Thread wangda
MAPREDUCE-6357. MultipleOutputs.write() API should document that output 
committing is not utilized when input path is absolute. Contributed by Dustin 
Cote.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ba90c93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ba90c93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ba90c93

Branch: refs/heads/YARN-1197
Commit: 2ba90c93d71aa2d30ee9ed431750c10c685e5599
Parents: 3347493
Author: Akira Ajisaka 
Authored: Fri Aug 21 10:41:54 2015 +0900
Committer: Akira Ajisaka 
Committed: Fri Aug 21 10:41:54 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  |  4 
 .../hadoop/mapreduce/lib/output/MultipleOutputs.java  | 14 +-
 2 files changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ba90c93/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 43d0faf..361a19b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -555,6 +555,10 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5817. Mappers get rescheduled on node transition even after all
 reducers are completed. (Sangjin Lee via kasha)
 
+MAPREDUCE-6357. MultipleOutputs.write() API should document that output
+committing is not utilized when input path is absolute.
+(Dustin Cote via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ba90c93/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
index c31cab7..a3a0e76 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
@@ -120,7 +120,11 @@ import java.util.*;
  * 
  * 
  * Use MultipleOutputs.write(KEYOUT key, VALUEOUT value, String 
baseOutputPath) to write key and 
- * value to a path specified by baseOutputPath, with no need to 
specify a named output:
+ * value to a path specified by baseOutputPath, with no need to 
specify a named output.
+ * Warning: when the baseOutputPath passed to MultipleOutputs.write
+ * is a path that resolves outside of the final job output directory, the
+ * directory is created immediately and then persists through subsequent
+ * task retries, breaking the concept of output committing:
  * 
  * 
  * 
@@ -418,6 +422,10 @@ public class MultipleOutputs {
* @param value  the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
+   * Warning: when the baseOutputPath is a path that resolves
+   * outside of the final job output directory, the directory is created
+   * immediately and then persists through subsequent task retries, breaking
+   * the concept of output committing.
*/
   @SuppressWarnings("unchecked")
   public  void write(String namedOutput, K key, V value,
@@ -442,6 +450,10 @@ public class MultipleOutputs {
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
+   * Warning: when the baseOutputPath is a path that resolves
+   * outside of the final job output directory, the directory is created
+   * immediately and then persists through subsequent task retries, breaking
+   * the concept of output committing.
*/
   @SuppressWarnings("unchecked")
   public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 



[13/42] hadoop git commit: YARN-3986. getTransferredContainers in AbstractYarnScheduler should be present in YarnScheduler interface

2015-08-25 Thread wangda
YARN-3986. getTransferredContainers in AbstractYarnScheduler should be present 
in YarnScheduler interface


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22de7c1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22de7c1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22de7c1d

Branch: refs/heads/YARN-1197
Commit: 22de7c1dca1be63d523de833163ae51bfe638a79
Parents: b0564c9
Author: Rohith Sharma K S 
Authored: Fri Aug 21 10:51:11 2015 +0530
Committer: Rohith Sharma K S 
Committed: Fri Aug 21 10:51:11 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../server/resourcemanager/ApplicationMasterService.java |  3 +--
 .../server/resourcemanager/scheduler/YarnScheduler.java  | 11 +++
 .../resourcemanager/TestWorkPreservingRMRestart.java |  2 +-
 4 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22de7c1d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2518eb5..cf7b67f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -786,6 +786,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4028. AppBlock page key update and diagnostics value null on recovery
 (Bibin A Chundatt via xgong)
 
+YARN-3986. getTransferredContainers in AbstractYarnScheduler should be 
present
+in YarnScheduler interface instead. (Varun Saxena via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22de7c1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index ff9b820..c8b985d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAt
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
@@ -305,7 +304,7 @@ public class ApplicationMasterService extends 
AbstractService implements
   // and corresponding NM tokens.
   if (app.getApplicationSubmissionContext()
   .getKeepContainersAcrossApplicationAttempts()) {
-List transferredContainers = ((AbstractYarnScheduler) 
rScheduler)
+List transferredContainers = rScheduler
 .getTransferredContainers(applicationAttemptId);
 if (!transferredContainers.isEmpty()) {
   response.setContainersFromPreviousAttempts(transferredContainers);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22de7c1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
index 0fa23e1..4fd16c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcema

[12/42] hadoop git commit: HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang. Moved CHANGES.txt entry to 2.7.2

2015-08-25 Thread wangda
HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang.
Moved CHANGES.txt entry to 2.7.2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0564c9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0564c9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0564c9f

Branch: refs/heads/YARN-1197
Commit: b0564c9f3c501bf7806f07649929038624dea10f
Parents: 2ba90c9
Author: Chris Douglas 
Authored: Thu Aug 20 20:39:06 2015 -0700
Committer: Chris Douglas 
Committed: Thu Aug 20 20:39:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0564c9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 658788c..7f99fc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1186,8 +1186,6 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)
 
-HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
-
 HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to
 replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)
 
@@ -1226,6 +1224,8 @@ Release 2.7.2 - UNRELEASED
 HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
 flawed. (Kihwal Lee via yliu)
 
+HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[08/42] hadoop git commit: HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java requires it (Ayappan via Colin P. McCabe)

2015-08-25 Thread wangda
HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java 
requires it (Ayappan via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7642f64c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7642f64c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7642f64c

Branch: refs/heads/YARN-1197
Commit: 7642f64c24961d2b4772591a0957e2699162a083
Parents: fc07464
Author: Colin Patrick Mccabe 
Authored: Thu Aug 20 13:57:32 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Aug 20 13:57:32 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7642f64c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fad2a867..dcc5d58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1191,6 +1191,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to
 replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)
 
+HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
+requires it (Ayappan via Colin P. McCabe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7642f64c/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index b4a3b40..2f8620b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -144,6 +144,7 @@ add_library(native_mini_dfs
 )
 target_link_libraries(native_mini_dfs
 ${JAVA_JVM_LIBRARY}
+${LIB_DL}
 ${OS_LINK_LIBRARIES}
 )
 



[18/42] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-25 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
deleted file mode 100644
index 17365fb..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.util;
-
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import com.google.common.base.Preconditions;
-
-/**
- * An InputStream implementations which reads from some other InputStream
- * but expects an exact number of bytes. Any attempts to read past the
- * specified number of bytes will return as if the end of the stream
- * was reached. If the end of the underlying stream is reached prior to
- * the specified number of bytes, an EOFException is thrown.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ExactSizeInputStream extends FilterInputStream {
-  private int remaining;
-
-  /**
-   * Construct an input stream that will read no more than
-   * 'numBytes' bytes.
-   * 
-   * If an EOF occurs on the underlying stream before numBytes
-   * bytes have been read, an EOFException will be thrown.
-   * 
-   * @param in the inputstream to wrap
-   * @param numBytes the number of bytes to read
-   */
-  public ExactSizeInputStream(InputStream in, int numBytes) {
-super(in);
-Preconditions.checkArgument(numBytes >= 0,
-"Negative expected bytes: ", numBytes);
-this.remaining = numBytes;
-  }
-
-  @Override
-  public int available() throws IOException {
-return Math.min(super.available(), remaining);
-  }
-
-  @Override
-  public int read() throws IOException {
-// EOF if we reached our limit
-if (remaining <= 0) {
-  return -1;
-}
-final int result = super.read();
-if (result >= 0) {
-  --remaining;
-} else if (remaining > 0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  "Premature EOF. Expected " + remaining + "more bytes");
-}
-return result;
-  }
-
-  @Override
-  public int read(final byte[] b, final int off, int len)
-  throws IOException {
-if (remaining <= 0) {
-  return -1;
-}
-len = Math.min(len, remaining);
-final int result = super.read(b, off, len);
-if (result >= 0) {
-  remaining -= result;
-} else if (remaining > 0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  "Premature EOF. Expected " + remaining + "more bytes");
-}
-return result;
-  }
-
-  @Override
-  public long skip(final long n) throws IOException {
-final long result = super.skip(Math.min(n, remaining));
-if (result > 0) {
-  remaining -= result;
-} else if (remaining > 0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  "Premature EOF. Expected " + remaining + "more bytes");
-}
-return result;
-  }
-  
-  @Override
-  public boolean markSupported() {
-return false;
-  }
-
-  @Override
-  public void mark(int readlimit) {
-throw new UnsupportedOperationException();
-  }
-  
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/pr

[33/42] hadoop git commit: HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals before deleting. Contributed by Casey Brotherton.

2015-08-25 Thread wangda
HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals 
before deleting. Contributed by Casey Brotherton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af787678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af787678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af787678

Branch: refs/heads/YARN-1197
Commit: af78767870b8296886c03f8be24cf13a4e2bd4b0
Parents: 57c7ae1
Author: Harsh J 
Authored: Tue Aug 25 11:21:19 2015 +0530
Committer: Harsh J 
Committed: Tue Aug 25 11:21:19 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/fs/TrashPolicyDefault.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4250fc3..0ec4ed6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -796,6 +796,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
+before deleting (Casey Brotherton via harsh)
+
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 88aeab5..1ed8a46 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
   @SuppressWarnings("deprecation")
   @Override
   public void createCheckpoint() throws IOException {
+createCheckpoint(new Date());
+  }
+
+  @SuppressWarnings("deprecation")
+  public void createCheckpoint(Date date) throws IOException {
+
 if (!fs.exists(current)) // no trash, no checkpoint
   return;
 
 Path checkpointBase;
 synchronized (CHECKPOINT) {
-  checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
+  checkpointBase = new Path(trash, CHECKPOINT.format(date));
+
 }
 Path checkpoint = checkpointBase;
 
@@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 TrashPolicyDefault trash = new TrashPolicyDefault(
 fs, home.getPath(), conf);
 trash.deleteCheckpoint();
-trash.createCheckpoint();
+trash.createCheckpoint(new Date(now));
   } catch (IOException e) {
 LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+".");
   } 



[35/42] hadoop git commit: YARN-3866. AM-RM protocol changes to support container resizing. Contributed by Meng Ding

2015-08-25 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5338bd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
deleted file mode 100644
index 29b0ffe..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api;
-
-import org.junit.Assert;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
-import org.apache.hadoop.yarn.api.records.Resource;
-import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceDecreasePBImpl;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceDecreaseProto;
-import org.junit.Test;
-
-public class TestContainerResourceDecrease {
-  @Test
-  public void testResourceDecreaseContext() {
-ContainerId containerId = ContainerId
-.newContainerId(ApplicationAttemptId.newInstance(
-ApplicationId.newInstance(1234, 3), 3), 7);
-Resource resource = Resource.newInstance(1023, 3);
-ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance(
-containerId, resource);
-
-// get proto and recover to ctx
-ContainerResourceDecreaseProto proto = 
-((ContainerResourceDecreasePBImpl) ctx).getProto();
-ctx = new ContainerResourceDecreasePBImpl(proto);
-
-// check values
-Assert.assertEquals(ctx.getCapability(), resource);
-Assert.assertEquals(ctx.getContainerId(), containerId);
-  }
-  
-  @Test
-  public void testResourceDecreaseContextWithNull() {
-ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance(null,
-null);
-
-// get proto and recover to ctx;
-ContainerResourceDecreaseProto proto = 
-((ContainerResourceDecreasePBImpl) ctx).getProto();
-ctx = new ContainerResourceDecreasePBImpl(proto);
-
-// check values
-Assert.assertNull(ctx.getCapability());
-Assert.assertNull(ctx.getContainerId());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5338bd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
deleted file mode 100644
index 932d5a7..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api;

[17/42] hadoop git commit: MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via kasha)

2015-08-25 Thread wangda
MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61bf9cae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61bf9cae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61bf9cae

Branch: refs/heads/YARN-1197
Commit: 61bf9cae6f3882c6e9a9222f59457b9be91e3018
Parents: 745d04b
Author: Karthik Kambatla 
Authored: Sat Aug 22 00:38:47 2015 -0700
Committer: Karthik Kambatla 
Committed: Sat Aug 22 00:39:04 2015 -0700

--
 .../src/main/java/org/apache/hadoop/conf/Configuration.java | 9 +++--
 hadoop-mapreduce-project/CHANGES.txt| 2 ++
 .../src/main/java/org/apache/hadoop/mapred/TaskLog.java | 4 +++-
 3 files changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61bf9cae/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 0b45429..6f1d3f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -988,8 +988,13 @@ public class Configuration implements 
Iterable>,
   } catch(SecurityException se) {
 LOG.warn("Unexpected SecurityException in Configuration", se);
   }
-  if (val == null) {
-val = getRaw(var);
+  if (val == null || val.isEmpty()) {
+String raw = getRaw(var);
+if (raw != null) {
+  // if System.getProperty(var) returns an empty string, retain this
+  // value instead of return null
+  val = raw;
+}
   }
   if (val == null) {
 return eval; // return literal ${var}: var is unbound

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61bf9cae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 361a19b..305b29e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -275,6 +275,8 @@ Trunk (Unreleased)
 MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
 (Steven Wong via aw)
 
+MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via kasha)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61bf9cae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index e07b5be..b8bb2f2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -181,7 +181,9 @@ public class TaskLog {
   }
 
   static String getBaseLogDir() {
-return System.getProperty("hadoop.log.dir");
+String logDir = System.getProperty("hadoop.log.dir");
+// file is treating "" different from null {@see File#File(String, String)}
+return logDir == null || logDir.isEmpty() ? null : logDir;
   }
 
   static File getAttemptDir(TaskAttemptID taskid, boolean isCleanup) {



[31/42] hadoop git commit: HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric before editLogStream initialization. Contributed by Surendra Singh Lilhore

2015-08-25 Thread wangda
HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric before 
editLogStream initialization. Contributed by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b00eaea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b00eaea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b00eaea

Branch: refs/heads/YARN-1197
Commit: 3b00eaea256d252be3361a7d9106b88756fcb9ba
Parents: 66d0c81
Author: Xiaoyu Yao 
Authored: Mon Aug 24 16:56:24 2015 -0700
Committer: Xiaoyu Yao 
Committed: Mon Aug 24 16:56:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/server/namenode/FSEditLog.java   | 12 
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  7 ++-
 3 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b00eaea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1844357..7aadcc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1211,6 +1211,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
 TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
 
+HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric
+before editLogStream initialization. (Surendra Singh Lilhore via xyao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b00eaea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index b1960d9..faaea63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1692,10 +1692,14 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
-   +   * Return total number of syncs happened on this edit log.
-   +   * @return long - count
-   +   */
+   * Return total number of syncs happened on this edit log.
+   * @return long - count
+   */
   public long getTotalSyncCount() {
-return editLogStream.getNumSync();
+if (editLogStream != null) {
+  return editLogStream.getNumSync();
+} else {
+  return 0;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b00eaea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6baa70f..3c3ef0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7295,7 +7295,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Metric({"TotalSyncTimes",
   "Total time spend in sync operation on various edit logs"})
   public String getTotalSyncTimes() {
-return fsImage.editLog.getJournalSet().getSyncTimes();
+JournalSet journalSet = fsImage.editLog.getJournalSet();
+if (journalSet != null) {
+  return journalSet.getSyncTimes();
+} else {
+  return "";
+}
   }
 }
 



[22/42] hadoop git commit: YARN-221. NM should provide a way for AM to tell it not to aggregate logs. Contributed by Ming Ma

2015-08-25 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e1c3d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 6a3d270..77d75ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -86,6 +86,8 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -99,11 +101,13 @@ import 
org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
-import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerLogAggregationPolicy;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
-import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
@@ -191,12 +195,12 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 app1LogDir.mkdir();
 logAggregationService
 .handle(new LogHandlerAppStartedEvent(
-application1, this.user, null,
-ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));
+application1, this.user, null, this.acls));
 
 ApplicationAttemptId appAttemptId =
 BuilderUtils.newApplicationAttemptId(application1, 1);
-ContainerId container11 = BuilderUtils.newContainerId(appAttemptId, 1);
+ContainerId container11 = createContainer(appAttemptId, 1,
+ContainerType.APPLICATION_MASTER);
 // Simulate log-file creation
 writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
 "stderr", "syslog" });
@@ -302,11 +306,12 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 LogAggregationContext context =
 LogAggregationContext.newInstance("HOST*", "sys*");
 logAggregationService.handle(new LogHandlerAppStartedEvent(app, this.user,
-null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, 
context));
+null, this.acls, context));
 
 ApplicationAttemptId appAttemptId =
 BuilderUtils.newApplicationAttemptId(app, 1);
-ContainerId cont = BuilderUtils.newContainerId(appAttemptId, 1);
+ContainerId cont = createContainer(appAttemptId, 1,
+ContainerType.APPLICATION_MASTER);
 writeContainerLogs(appLogDir, cont, new String[] { "stdout",
 "stderr", "syslog" });
 logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont, 
0));
@@ -337,8 +342,7 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 app1LogDir.mkdir();
 logAggregationService
 .handle(new LogHandlerAppStartedEvent(
-application1, this.user, null,
-ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));
+application1, this.user, null, this.acls));
 
 logAggregationService.handle(new LogHandlerAppFinishedEvent(
 application1));
@@ -388,13 +392,13 @@ pu

[09/42] hadoop git commit: HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. Contributed by Jing Zhao.

2015-08-25 Thread wangda
HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8bca627
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8bca627
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8bca627

Branch: refs/heads/YARN-1197
Commit: c8bca62718203a1dad9b70d164bdf10cc71b40cd
Parents: 7642f64
Author: Jing Zhao 
Authored: Thu Aug 20 16:31:24 2015 -0700
Committer: Jing Zhao 
Committed: Thu Aug 20 16:31:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  2 ++
 .../apache/hadoop/hdfs/server/namenode/NamenodeFsck.java  | 10 ++
 .../org/apache/hadoop/hdfs/server/namenode/TestFsck.java  |  2 ++
 3 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dcc5d58..d9d176b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1194,6 +1194,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
 requires it (Ayappan via Colin P. McCabe)
 
+HDFS-8809. HDFS fsck reports under construction blocks as "CORRUPT". 
(jing9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 2f989d1..c7892b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -528,6 +528,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   LocatedBlocks blocks) throws IOException {
 String path = file.getFullName(parent);
 boolean isOpen = blocks.isUnderConstruction();
+if (isOpen && !showOpenFiles) {
+  return;
+}
 int missing = 0;
 int corrupt = 0;
 long missize = 0;
@@ -536,8 +539,15 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 int misReplicatedPerFile = 0;
 StringBuilder report = new StringBuilder();
 int blockNumber = 0;
+final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
 for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
   ExtendedBlock block = lBlk.getBlock();
+  if (!blocks.isLastBlockComplete() && lastBlock != null &&
+  lastBlock.getBlock().equals(block)) {
+// this is the last block and this is not complete. ignore it since
+// it is under construction
+continue;
+  }
   BlockManager bm = namenode.getNamesystem().getBlockManager();
 
   final BlockInfo storedBlock = bm.getStoredBlock(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 8818f17..2226947 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -612,6 +613,7 @@ public class TestFsck {
 out.write(randomString.getBytes());
 writeCount++;  
   }
+  ((DFSOutputStream) out.getWrappedStream()).hflush();
   // We expect the filesystem to be HEALTHY and show one open file
   outStr = runFsck(conf, 0, true, topDir);
   System.out.println(outStr);



[26/42] hadoop git commit: YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its response id has not been reset synchronously. (Jun Gong via rohithsharmaks)

2015-08-25 Thread wangda
YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its response id 
has not been reset synchronously. (Jun Gong via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feaf0349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feaf0349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feaf0349

Branch: refs/heads/YARN-1197
Commit: feaf0349949e831ce3f25814c1bbff52f17bfe8f
Parents: bcaf839
Author: Rohith Sharma K S 
Authored: Mon Aug 24 11:25:07 2015 +0530
Committer: Rohith Sharma K S 
Committed: Mon Aug 24 11:25:07 2015 +0530

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |  3 ++
 .../yarn/sls/scheduler/RMNodeWrapper.java   |  5 +++
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../resourcemanager/ResourceTrackerService.java |  2 +
 .../server/resourcemanager/rmnode/RMNode.java   |  7 +++-
 .../resourcemanager/rmnode/RMNodeImpl.java  | 15 +---
 .../yarn/server/resourcemanager/MockNodes.java  |  4 ++
 .../resourcetracker/TestNMReconnect.java| 39 
 8 files changed, 72 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf0349/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 440779c..2d2c3e0 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -149,6 +149,9 @@ public class NodeInfo {
   return null;
 }
 
+public void resetLastNodeHeartBeatResponse() {
+}
+
 public List pullContainerUpdates() {
   ArrayList list = new 
ArrayList();
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf0349/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index a6633ae..ecc4734 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -135,6 +135,11 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public void resetLastNodeHeartBeatResponse() {
+node.getLastNodeHeartBeatResponse().setResponseId(0);
+  }
+
+  @Override
   @SuppressWarnings("unchecked")
   public List pullContainerUpdates() {
 List list = Collections.EMPTY_LIST;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf0349/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5904a31..bf58c96 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -792,6 +792,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3986. getTransferredContainers in AbstractYarnScheduler should be 
present
 in YarnScheduler interface instead. (Varun Saxena via rohithsharmaks)
 
+YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its 
response id 
+has not been reset synchronously. (Jun Gong via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf0349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 3c2c09b..100e991 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/

[20/42] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-25 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
new file mode 100644
index 000..78325a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -0,0 +1,647 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.shortcircuit;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.BitSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Random;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.fs.InvalidRequestException;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import sun.misc.Unsafe;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ComparisonChain;
+import com.google.common.primitives.Ints;
+
+/**
+ * A shared memory segment used to implement short-circuit reads.
+ */
+public class ShortCircuitShm {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ShortCircuitShm.class);
+
+  protected static final int BYTES_PER_SLOT = 64;
+
+  private static final Unsafe unsafe = safetyDance();
+
+  private static Unsafe safetyDance() {
+try {
+  Field f = Unsafe.class.getDeclaredField("theUnsafe");
+  f.setAccessible(true);
+  return (Unsafe)f.get(null);
+} catch (Throwable e) {
+  LOG.error("failed to load misc.Unsafe", e);
+}
+return null;
+  }
+
+  /**
+   * Calculate the usable size of a shared memory segment.
+   * We round down to a multiple of the slot size and do some validation.
+   *
+   * @param stream The stream we're using.
+   * @return   The usable size of the shared memory segment.
+   */
+  private static int getUsableLength(FileInputStream stream)
+  throws IOException {
+int intSize = Ints.checkedCast(stream.getChannel().size());
+int slots = intSize / BYTES_PER_SLOT;
+if (slots == 0) {
+  throw new IOException("size of shared memory segment was " +
+  intSize + ", but that is not enough to hold even one slot.");
+}
+return slots * BYTES_PER_SLOT;
+  }
+
+  /**
+   * Identifies a DfsClientShm.
+   */
+  public static class ShmId implements Comparable {
+private static final Random random = new Random();
+private final long hi;
+private final long lo;
+
+/**
+ * Generate a random ShmId.
+ * 
+ * We generate ShmIds randomly to prevent a malicious client from
+ * successfully guessing one and using that to interfere with another
+ * client.
+ */
+public static ShmId createRandom() {
+  return new ShmId(random.nextLong(), random.nextLong());
+}
+
+public ShmId(long hi, long lo) {
+  this.hi = hi;
+  this.lo = lo;
+}
+
+public long getHi() {
+  return hi;
+}
+
+public long getLo() {
+  return lo;
+}
+
+@Override
+public boolean equals(Object o) {
+  if ((o == null) || (o.getClass() != this.getClass())) {
+return false;
+  }
+  ShmId other = (ShmId)o;
+  return new EqualsBuilder().
+  append(hi, other.hi).
+  append(lo, other.lo).
+  isEquals();
+}
+
+@Override
+public int hashCode() {
+  return new HashCodeBuilder().
+  append(this.hi).
+  append(this.lo).
+  toHashCode();
+}
+
+@Override
+public String toString() {
+  return String.format("%016x%016x", hi, 

[29/42] hadoop git commit: HDFS-8928. Improvements for BlockUnderConstructionFeature: ReplicaUnderConstruction as a separate class and replicas as an array. Contributed by Jing Zhao.

2015-08-25 Thread wangda
HDFS-8928. Improvements for BlockUnderConstructionFeature: 
ReplicaUnderConstruction as a separate class and replicas as an array. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd79388
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd79388
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd79388

Branch: refs/heads/YARN-1197
Commit: bdd79388f39f4f35af7decd5703eff587b0ddfb7
Parents: 48774d0
Author: Jing Zhao 
Authored: Mon Aug 24 15:53:34 2015 -0700
Committer: Jing Zhao 
Committed: Mon Aug 24 15:53:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   4 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   3 +-
 .../BlockUnderConstructionFeature.java  | 211 ++-
 .../ReplicaUnderConstruction.java   | 119 +++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 6 files changed, 195 insertions(+), 152 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c90c247..b17492d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -829,6 +829,10 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8934. Move ShortCircuitShm to hdfs-client. (Mingliang Liu via wheat9)
 
+HDFS-8928. Improvements for BlockUnderConstructionFeature:
+ReplicaUnderConstruction as a separate class and replicas as an array.
+(jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 72fc005..a9dfdde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -24,7 +24,6 @@ import java.util.List;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature.ReplicaUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.LightWeightGSet;
@@ -363,7 +362,7 @@ public abstract class  BlockInfo extends Block
 } else {
   // the block is already under construction
   uc.setBlockUCState(s);
-  uc.setExpectedLocations(this.getGenerationStamp(), targets);
+  uc.setExpectedLocations(this, targets);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index de51b2f..88cf06d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
+import java.util.ArrayList;
+import java.util.List;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.COMPLETE;
+
 /**
- * Represents a block that is currently being constructed.
+ * Represents t

[14/42] hadoop git commit: HADOOP-12347. Fix mismatch parameter name in javadocs of AuthToken#setMaxInactives. Contributed by Xiaoyu Yao

2015-08-25 Thread wangda
HADOOP-12347. Fix mismatch parameter name in javadocs of 
AuthToken#setMaxInactives. Contributed by Xiaoyu Yao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caa636bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caa636bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caa636bf

Branch: refs/heads/YARN-1197
Commit: caa636bf10d96ca2d4e151225fb46134ce99f9cf
Parents: 22de7c1
Author: Xiaoyu Yao 
Authored: Fri Aug 21 16:32:16 2015 -0700
Committer: Xiaoyu Yao 
Committed: Fri Aug 21 16:32:57 2015 -0700

--
 .../apache/hadoop/security/authentication/util/AuthToken.java  | 3 ++-
 hadoop-common-project/hadoop-common/CHANGES.txt| 6 ++
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa636bf/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
index 870b267..4fbe599 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
@@ -96,7 +96,8 @@ public class AuthToken implements Principal {
   /**
* Sets the max inactive interval of the token.
*
-   * @param max inactive interval of the token in milliseconds since the epoch.
+   * @param interval max inactive interval of the token in milliseconds since
+   * the epoch.
*/
   public void setMaxInactives(long interval) {
 this.maxInactives = interval;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa636bf/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d6353a4..b4445fa 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -750,6 +750,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
 
+HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
+(hzlu via benoyantony)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp
@@ -1067,6 +1070,9 @@ Release 2.8.0 - UNRELEASED
 because NM container recovery declares AM container as LOST
 (adhoot via rkanter)
 
+HADOOP-12347. Fix mismatch parameter name in javadocs of
+AuthToken#setMaxInactives (xyao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[30/42] hadoop git commit: HDFS-8948. Use GenericTestUtils to set log levels in TestPread and TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.

2015-08-25 Thread wangda
HDFS-8948. Use GenericTestUtils to set log levels in TestPread and 
TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66d0c81d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66d0c81d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66d0c81d

Branch: refs/heads/YARN-1197
Commit: 66d0c81d8f4e200a5051c8df87be890c9ad8772e
Parents: bdd7938
Author: Haohui Mai 
Authored: Mon Aug 24 16:16:10 2015 -0700
Committer: Haohui Mai 
Committed: Mon Aug 24 16:18:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java  | 4 ++--
 .../org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b17492d..1844357 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1208,6 +1208,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8930. Block report lease may leak if the 2nd full block report comes
 when NN is still in safemode (Colin P. McCabe via Jing Zhao)
 
+HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
+TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index ed553f6..43650a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -29,7 +29,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
@@ -261,7 +261,7 @@ public class TestPread {
   @Test
   public void testPreadDFSNoChecksum() throws IOException {
 Configuration conf = new Configuration();
-((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
+GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
 dfsPreadTest(conf, false, false);
 dfsPreadTest(conf, true, false);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index f92f287..d351020 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Te

[01/42] hadoop git commit: HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests. Contributed by Zhe Zhang.

2015-08-25 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1197 e258bd4b4 -> f35a94517 (forced update)


HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e14f798
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e14f798
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e14f798

Branch: refs/heads/YARN-1197
Commit: 4e14f7982a6e57bf08deb3b266806c2b779a157d
Parents: 3aac475
Author: Jing Zhao 
Authored: Wed Aug 19 15:11:37 2015 -0700
Committer: Jing Zhao 
Committed: Wed Aug 19 15:11:37 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |  1 -
 .../blockmanagement/BlockInfoContiguous.java|  3 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../BlockUnderConstructionFeature.java  |  4 +-
 .../namenode/FileUnderConstructionFeature.java  |  2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |  4 +-
 .../TestBlockInfoUnderConstruction.java | 80 
 .../TestBlockUnderConstructionFeature.java  | 80 
 .../namenode/snapshot/SnapshotTestHelper.java   |  4 +-
 11 files changed, 93 insertions(+), 92 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7fbc23..080f0d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -816,6 +816,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8803. Move DfsClientConf to hdfs-client. (Mingliang Liu via wheat9)
 
+HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests.
+(Zhe Zhang via jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 94dac35..659be56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -78,7 +78,6 @@ public abstract class  BlockInfo extends Block
 
   /**
* Copy construction.
-   * This is used to convert BlockInfoUnderConstruction
* @param from BlockInfo to copy from.
*/
   protected BlockInfo(BlockInfo from) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index eff89a8..42934c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -37,8 +37,7 @@ public class BlockInfoContiguous extends BlockInfo {
 
   /**
* Copy construction.
-   * This is used to convert BlockReplicationInfoUnderConstruction
-   * @param from BlockReplicationInfo to copy from.
+   * @param from BlockInfoContiguous to copy from.
*/
   protected BlockInfoContiguous(BlockInfoContiguous from) {
 super(from);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index aad7fec..f2d0515 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement

[24/42] hadoop git commit: YARN-221. Addendum patch to compilation issue which is caused by missing AllContainerLogAggregationPolicy. Contributed by Xuan Gong

2015-08-25 Thread wangda
YARN-221. Addendum patch to compilation issue which is caused by missing
AllContainerLogAggregationPolicy. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b71c6006
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b71c6006
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b71c6006

Branch: refs/heads/YARN-1197
Commit: b71c6006f579ac6f0755975a9b908b0062618b46
Parents: 37e1c3d
Author: Xuan 
Authored: Sun Aug 23 16:46:30 2015 -0700
Committer: Xuan 
Committed: Sun Aug 23 16:46:30 2015 -0700

--
 .../ContainerLogsRetentionPolicy.java   | 30 
 .../AllContainerLogAggregationPolicy.java   | 30 
 2 files changed, 30 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71c6006/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRetentionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRetentionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRetentionPolicy.java
deleted file mode 100644
index 3e7cd5a..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRetentionPolicy.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.yarn.server.api.ContainerLogContext;
-
-@Private
-public class AllContainerLogAggregationPolicy extends
-AbstractContainerLogAggregationPolicy {
-  public boolean shouldDoLogAggregation(ContainerLogContext logContext) {
-return true;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71c6006/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AllContainerLogAggregationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AllContainerLogAggregationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AllContainerLogAggregationPolicy.java
new file mode 100644
index 000..3e7cd5a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AllContainerLogAggregationPolicy.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
+
+

[36/42] hadoop git commit: YARN-3866. AM-RM protocol changes to support container resizing. Contributed by Meng Ding

2015-08-25 Thread wangda
YARN-3866. AM-RM protocol changes to support container resizing. Contributed by 
Meng Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5338bd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5338bd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5338bd5

Branch: refs/heads/YARN-1197
Commit: d5338bd5a90e7d9de7da5ff4ca8b97d90a7a5e47
Parents: eee0d45
Author: Jian He 
Authored: Mon Jul 13 17:34:26 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:06:17 2015 -0700

--
 .../app/local/TestLocalContainerAllocator.java  |   6 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../api/protocolrecords/AllocateRequest.java|  57 --
 .../api/protocolrecords/AllocateResponse.java   |  42 +++--
 .../records/ContainerResourceChangeRequest.java | 117 +
 .../api/records/ContainerResourceDecrease.java  |  78 -
 .../api/records/ContainerResourceIncrease.java  |  84 -
 .../ContainerResourceIncreaseRequest.java   |  80 -
 .../yarn/api/records/ContainerStatus.java   |  13 ++
 .../src/main/proto/yarn_protos.proto|  14 +-
 .../src/main/proto/yarn_service_protos.proto|  16 +-
 .../impl/pb/AllocateRequestPBImpl.java  | 119 ++---
 .../impl/pb/AllocateResponsePBImpl.java | 175 +--
 .../ContainerResourceChangeRequestPBImpl.java   | 141 +++
 .../pb/ContainerResourceDecreasePBImpl.java | 136 --
 .../pb/ContainerResourceIncreasePBImpl.java | 171 --
 .../ContainerResourceIncreaseRequestPBImpl.java | 141 ---
 .../records/impl/pb/ContainerStatusPBImpl.java  |  31 +++-
 .../hadoop/yarn/api/TestAllocateRequest.java|  73 
 .../hadoop/yarn/api/TestAllocateResponse.java   | 114 
 .../yarn/api/TestContainerResourceDecrease.java |  66 ---
 .../yarn/api/TestContainerResourceIncrease.java |  74 
 .../TestContainerResourceIncreaseRequest.java   |  68 ---
 .../hadoop/yarn/api/TestPBImplRecords.java  |  34 +---
 24 files changed, 534 insertions(+), 1319 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5338bd5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
index f901ed8..167d804 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
@@ -46,8 +46,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
-import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
@@ -254,8 +252,8 @@ public class TestLocalContainerAllocator {
   Resources.none(), null, 1, null,
   Collections.emptyList(),
   yarnToken,
-  Collections.emptyList(),
-  Collections.emptyList());
+  Collections.emptyList(),
+  Collections.emptyList());
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5338bd5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1190619..3d7ccc9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -184,6 +184,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4014. Support user cli interface in for Application Priority.
 (Rohith Sharma K S via jianhe)
 
+YARN-3866. AM-RM protocol changes to support container resizing. (Meng Ding
+via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoo

[40/42] hadoop git commit: YARN-3867. ContainerImpl changes to support container resizing. Contributed by Meng Ding

2015-08-25 Thread wangda
YARN-3867. ContainerImpl changes to support container resizing. Contributed by 
Meng Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea7e8872
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea7e8872
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea7e8872

Branch: refs/heads/YARN-1197
Commit: ea7e887282d9f6ef07d0b77ed10494391b227129
Parents: 582ecf8
Author: Jian He 
Authored: Tue Jul 28 13:51:23 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:06:18 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  4 ++-
 .../containermanager/ContainerManagerImpl.java  |  7 ++--
 .../container/ChangeContainerResourceEvent.java | 36 ---
 .../containermanager/container/Container.java   |  2 ++
 .../container/ContainerEventType.java   |  4 ---
 .../container/ContainerImpl.java| 16 ++---
 .../ChangeMonitoringContainerResourceEvent.java | 37 
 .../monitor/ContainersMonitorEventType.java |  3 +-
 .../nodemanager/metrics/NodeManagerMetrics.java | 11 ++
 .../nodemanager/TestNodeStatusUpdater.java  |  2 +-
 .../metrics/TestNodeManagerMetrics.java | 18 +++---
 .../nodemanager/webapp/MockContainer.java   |  4 +++
 .../yarn/server/resourcemanager/MockNM.java |  2 +-
 .../server/resourcemanager/NodeManager.java |  2 +-
 .../resourcemanager/TestApplicationCleanup.java |  6 ++--
 .../attempt/TestRMAppAttemptTransitions.java| 21 +++
 .../capacity/TestCapacityScheduler.java |  2 +-
 .../scheduler/fifo/TestFifoScheduler.java   |  4 +--
 .../security/TestAMRMTokens.java|  3 +-
 20 files changed, 118 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7e8872/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f0f7732..5a1ce6e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -193,6 +193,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1645. ContainerManager implementation to support container resizing.
 (Meng Ding & Wangda Tan via jianhe)
 
+YARN-3867. ContainerImpl changes to support container resizing. (Meng Ding 
+via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7e8872/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index a3bd6f8..475e9fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -200,13 +200,15 @@ public class BuilderUtils {
   }
 
   public static ContainerStatus newContainerStatus(ContainerId containerId,
-  ContainerState containerState, String diagnostics, int exitStatus) {
+  ContainerState containerState, String diagnostics, int exitStatus,
+  Resource capability) {
 ContainerStatus containerStatus = recordFactory
   .newRecordInstance(ContainerStatus.class);
 containerStatus.setState(containerState);
 containerStatus.setContainerId(containerId);
 containerStatus.setDiagnostics(diagnostics);
 containerStatus.setExitStatus(exitStatus);
+containerStatus.setCapability(capability);
 return containerStatus;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7e8872/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl

[19/42] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-25 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 8e81fdc..beaa903 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -695,7 +695,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetDatanodeReportRequestProto req)
   throws ServiceException {
 try {
-  List result = PBHelper.convert(server
+  List result = PBHelperClient.convert(server
   .getDatanodeReport(PBHelper.convert(req.getType(;
   return GetDatanodeReportResponseProto.newBuilder()
   .addAllDi(result).build();
@@ -890,7 +890,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   server.setQuota(req.getPath(), req.getNamespaceQuota(),
   req.getStoragespaceQuota(),
   req.hasStorageType() ?
-  PBHelper.convertStorageType(req.getStorageType()): null);
+  PBHelperClient.convertStorageType(req.getStorageType()): null);
   return VOID_SETQUOTA_RESPONSE;
 } catch (IOException e) {
   throw new ServiceException(e);
@@ -990,7 +990,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   GetDelegationTokenResponseProto.Builder rspBuilder = 
   GetDelegationTokenResponseProto.newBuilder();
   if (token != null) {
-rspBuilder.setToken(PBHelper.convert(token));
+rspBuilder.setToken(PBHelperClient.convert(token));
   }
   return rspBuilder.build();
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490bb5eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index d6afa6e..d30982a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -390,7 +390,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   String holder) throws AccessControlException, FileNotFoundException,
 UnresolvedLinkException, IOException {
 AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder()
-.setB(PBHelper.convert(b)).setSrc(src).setHolder(holder)
+.setB(PBHelperClient.convert(b)).setSrc(src).setHolder(holder)
 .setFileId(fileId).build();
 try {
   rpcProxy.abandonBlock(null, req);
@@ -409,9 +409,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
 .setSrc(src).setClientName(clientName).setFileId(fileId);
 if (previous != null) 
-  req.setPrevious(PBHelper.convert(previous)); 
-if (excludeNodes != null) 
-  req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
+  req.setPrevious(PBHelperClient.convert(previous));
+if (excludeNodes != null)
+  req.addAllExcludeNodes(PBHelperClient.convert(excludeNodes));
 if (favoredNodes != null) {
   req.addAllFavoredNodes(Arrays.asList(favoredNodes));
 }
@@ -433,10 +433,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .newBuilder()
 .setSrc(src)
 .setFileId(fileId)
-.setBlk(PBHelper.convert(blk))
-.addAllExistings(PBHelper.convert(existings))
+.setBlk(PBHelperClient.convert(blk))
+.addAllExistings(PBHelperClient.convert(existings))
 .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
-.addAllExcludes(PBHelper.convert(excludes))
+.addAllExcludes(PBHelperClient.convert(excludes))
 .setNumAdditionalNodes(numAdditionalNodes)
 .setClientName(clientName)
 .build();
@@ -458,7 +458,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .setClientName(clientName)
 .setFileId(fileId);
 if (last != null)
-  req.setLast(PBHel

[41/42] hadoop git commit: YARN-1644. RM-NM protocol changes and NodeStatusUpdater implementation to support container resizing. Contributed by Meng Ding

2015-08-25 Thread wangda
YARN-1644. RM-NM protocol changes and NodeStatusUpdater implementation to 
support container resizing. Contributed by Meng Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c75bb5ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c75bb5ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c75bb5ac

Branch: refs/heads/YARN-1197
Commit: c75bb5ac9bb2a6ddfc281b3bc82be238a178d432
Parents: b24514e
Author: Jian He 
Authored: Thu Aug 20 21:04:14 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:09:59 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/client/TestResourceTrackerOnHA.java|   2 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   4 +
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  76 +-
 .../yarn/server/api/records/NodeStatus.java |  15 +-
 .../api/records/impl/pb/NodeStatusPBImpl.java   |  75 +-
 .../main/proto/yarn_server_common_protos.proto  |   3 +-
 .../yarn_server_common_service_protos.proto |   1 +
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  39 ++-
 .../hadoop/yarn/server/nodemanager/Context.java |   3 +
 .../yarn/server/nodemanager/NodeManager.java|  10 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |  59 -
 .../containermanager/ContainerManagerImpl.java  | 116 +
 .../nodemanager/TestNodeManagerResync.java  | 258 +++
 .../containermanager/TestContainerManager.java  |   2 +-
 15 files changed, 600 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c75bb5ac/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 758e1a8..35da67f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -199,6 +199,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1643. Make ContainersMonitor support changing monitoring size of an
 allocated container. (Meng Ding and Wangda Tan)
 
+YARN-1644. RM-NM protocol changes and NodeStatusUpdater implementation to
+support container resizing. (Meng Ding via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c75bb5ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
index 6cdf87f..338198b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
@@ -68,7 +68,7 @@ public class TestResourceTrackerOnHA extends 
ProtocolHATestBase{
 failoverThread = createAndStartFailoverThread();
 NodeStatus status =
 NodeStatus.newInstance(NodeId.newInstance("localhost", 0), 0, null,
-null, null, null, null);
+null, null, null, null, null);
 NodeHeartbeatRequest request2 =
 NodeHeartbeatRequest.newInstance(status, null, null,null);
 resourceTracker.nodeHeartbeat(request2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c75bb5ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
index 1498a0c..38fbc82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
@@ -24,6 +24,7 @@ import java.util.Map;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Contai

[42/42] hadoop git commit: YARN-3868. Recovery support for container resizing. Contributed by Meng Ding

2015-08-25 Thread wangda
YARN-3868. Recovery support for container resizing. Contributed by Meng Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f35a9451
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f35a9451
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f35a9451

Branch: refs/heads/YARN-1197
Commit: f35a94517a949ceed87e9481b0c32d19c1d59f06
Parents: c75bb5a
Author: Jian He 
Authored: Thu Aug 20 21:18:23 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:09:59 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../containermanager/ContainerManagerImpl.java  |   5 +-
 .../container/ContainerImpl.java|   8 +-
 .../recovery/NMLeveldbStateStoreService.java|  22 ++
 .../recovery/NMNullStateStoreService.java   |   6 +
 .../recovery/NMStateStoreService.java   |  15 ++
 .../TestContainerManagerRecovery.java   | 234 ++-
 .../recovery/NMMemoryStateStoreService.java |  11 +-
 .../TestNMLeveldbStateStoreService.java |  11 +
 9 files changed, 302 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f35a9451/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 35da67f..8bdf9ac 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -202,6 +202,8 @@ Release 2.8.0 - UNRELEASED
 YARN-1644. RM-NM protocol changes and NodeStatusUpdater implementation to
 support container resizing. (Meng Ding via jianhe)
 
+YARN-3868. Recovery support for container resizing. (Meng Ding via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f35a9451/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 01e75ba..da45df5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -328,7 +328,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   Container container = new ContainerImpl(getConfig(), dispatcher,
   context.getNMStateStore(), req.getContainerLaunchContext(),
   credentials, metrics, token, rcs.getStatus(), rcs.getExitCode(),
-  rcs.getDiagnostics(), rcs.getKilled());
+  rcs.getDiagnostics(), rcs.getKilled(), rcs.getCapability());
   context.getContainers().put(containerId, container);
   dispatcher.getEventHandler().handle(
   new ApplicationContainerInitEvent(container));
@@ -1072,6 +1072,9 @@ public class ContainerManagerImpl extends 
CompositeService implements
 this.readLock.lock();
 try {
   if (!serviceStopped) {
+// Persist container resource change for recovery
+this.context.getNMStateStore().storeContainerResourceChanged(
+containerId, targetResource);
 getContainersMonitor().handle(
 new ChangeMonitoringContainerResourceEvent(
 containerId, targetResource));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f35a9451/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 5c61a92..eff2188 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/

[27/42] hadoop git commit: HDFS-8930. Block report lease may leak if the 2nd full block report comes when NN is still in safemode (Colin P. McCabe via Jing Zhao)

2015-08-25 Thread wangda
HDFS-8930. Block report lease may leak if the 2nd full block report comes when 
NN is still in safemode (Colin P. McCabe via Jing Zhao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5ce87f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5ce87f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5ce87f8

Branch: refs/heads/YARN-1197
Commit: b5ce87f84d9de0a5347ab38c0567a5a70d1fbfd7
Parents: feaf034
Author: Colin Patrick Mccabe 
Authored: Mon Aug 24 11:31:56 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Aug 24 11:31:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 1 +
 .../hdfs/server/blockmanagement/TestBlockReportRateLimiting.java  | 2 --
 3 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0b7bc90..c90c247 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1201,6 +1201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture
 documentation. (Masatake Iwasaki via aajisaka)
 
+HDFS-8930. Block report lease may leak if the 2nd full block report comes
+when NN is still in safemode (Colin P. McCabe via Jing Zhao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5a77ad4..7f02612 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1849,6 +1849,7 @@ public class BlockManager implements BlockStatsMXBean {
 blockLog.info("BLOCK* processReport: "
 + "discarded non-initial block report from {}"
 + " because namenode still in startup phase", nodeID);
+blockReportLeaseManager.removeLease(node);
 return !node.hasStaleStorages();
   }
   if (context != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
index 86a7511..3cc1b02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -40,7 +39,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.List;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;



[23/42] hadoop git commit: YARN-221. NM should provide a way for AM to tell it not to aggregate logs. Contributed by Ming Ma

2015-08-25 Thread wangda
YARN-221. NM should provide a way for AM to tell it not to aggregate
logs. Contributed by Ming Ma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37e1c3d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37e1c3d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37e1c3d8

Branch: refs/heads/YARN-1197
Commit: 37e1c3d82a96d781e1c9982988b7de4aa5242d0c
Parents: 490bb5e
Author: Xuan 
Authored: Sat Aug 22 16:25:24 2015 -0700
Committer: Xuan 
Committed: Sat Aug 22 16:25:24 2015 -0700

--
 .../org/apache/hadoop/util/StringUtils.java |  13 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/records/LogAggregationContext.java |  95 +++
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../api/ContainerLogAggregationPolicy.java  |  54 ++
 .../yarn/server/api/ContainerLogContext.java|  71 ++
 .../src/main/proto/yarn_protos.proto|   2 +
 .../impl/pb/LogAggregationContextPBImpl.java|  40 ++
 .../ContainerLogsRetentionPolicy.java   |  15 +-
 .../src/main/resources/yarn-default.xml |  24 +
 .../application/ApplicationImpl.java|   5 +-
 .../AMOnlyLogAggregationPolicy.java |  31 +
 ...AMOrFailedContainerLogAggregationPolicy.java |  35 +
 .../AbstractContainerLogAggregationPolicy.java  |  31 +
 .../logaggregation/AppLogAggregator.java|   5 +-
 .../logaggregation/AppLogAggregatorImpl.java| 131 ++--
 .../FailedContainerLogAggregationPolicy.java|  33 +
 ...edOrKilledContainerLogAggregationPolicy.java |  30 +
 .../logaggregation/LogAggregationService.java   |  19 +-
 .../NoneContainerLogAggregationPolicy.java  |  30 +
 .../SampleContainerLogAggregationPolicy.java| 124 
 .../event/LogHandlerAppStartedEvent.java|  15 +-
 .../containermanager/TestAuxServices.java   |   1 +
 .../TestLogAggregationService.java  | 677 ---
 .../TestNonAggregatingLogHandler.java   |  12 +-
 .../capacity/TestContainerAllocation.java   |  12 +-
 26 files changed, 1343 insertions(+), 171 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e1c3d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 153270f..1107007 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -315,7 +315,18 @@ public class StringUtils {
* @return the arraylist of the comma seperated string values
*/
   public static String[] getStrings(String str){
-Collection values = getStringCollection(str);
+String delim = ",";
+return getStrings(str, delim);
+  }
+
+  /**
+   * Returns an arraylist of strings.
+   * @param str the string values
+   * @param delim delimiter to separate the values
+   * @return the arraylist of the seperated string values
+   */
+  public static String[] getStrings(String str, String delim){
+Collection values = getStringCollection(str, delim);
 if(values.size() == 0) {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e1c3d8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cf7b67f..5904a31 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -178,6 +178,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2923. Support configuration based NodeLabelsProvider Service in 
Distributed 
 Node Label Configuration Setup. (Naganarasimha G R)
 
+YARN-221. NM should provide a way for AM to tell it not to aggregate logs.
+(Ming Ma via xgong)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e1c3d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
index 9383004..5ac7d2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/re

[32/42] hadoop git commit: YARN-4014. Support user cli interface in for Application Priority. Contributed by Rohith Sharma K S

2015-08-25 Thread wangda
YARN-4014. Support user cli interface in for Application Priority. Contributed 
by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57c7ae1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57c7ae1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57c7ae1a

Branch: refs/heads/YARN-1197
Commit: 57c7ae1affb2e1821fbdc3f47738d7e6fd83c7c1
Parents: 3b00eae
Author: Jian He 
Authored: Mon Aug 24 20:36:08 2015 -0700
Committer: Jian He 
Committed: Mon Aug 24 20:36:44 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationClientProtocol.java |  18 ++
 .../UpdateApplicationPriorityRequest.java   |  80 +
 .../UpdateApplicationPriorityResponse.java  |  47 +
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_service_protos.proto|   8 +
 .../hadoop/yarn/client/api/YarnClient.java  |  17 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 ++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  29 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  29 
 .../ApplicationClientProtocolPBClientImpl.java  |  20 +++
 .../ApplicationClientProtocolPBServiceImpl.java |  22 +++
 .../UpdateApplicationPriorityRequestPBImpl.java | 171 +++
 ...UpdateApplicationPriorityResponsePBImpl.java |  69 
 .../server/resourcemanager/ClientRMService.java |  73 
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../resourcemanager/recovery/RMStateStore.java  |  12 +-
 .../recovery/RMStateUpdateAppEvent.java |  13 ++
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../resourcemanager/TestClientRMService.java|  63 +++
 22 files changed, 713 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 90f6876..91c3086 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -466,4 +467,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getClusterNodeLabels();
   }
+
+  @Override
+  public void updateApplicationPriority(ApplicationId applicationId,
+  Priority priority) throws YarnException, IOException {
+client.updateApplicationPriority(applicationId, priority);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index bb00b19..1bf1408 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -114,6 +114,8 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
 import org.apa

[25/42] hadoop git commit: HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture documentation. Contributed by Masatake Iwasaki.

2015-08-25 Thread wangda
HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture 
documentation. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcaf8390
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcaf8390
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcaf8390

Branch: refs/heads/YARN-1197
Commit: bcaf83902aa4d1e3e2cd26442df0a253eae7f633
Parents: b71c600
Author: Akira Ajisaka 
Authored: Mon Aug 24 13:52:49 2015 +0900
Committer: Akira Ajisaka 
Committed: Mon Aug 24 13:52:49 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcaf8390/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 78f69fb..0b7bc90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1198,6 +1198,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8809. HDFS fsck reports under construction blocks as "CORRUPT". 
(jing9)
 
+HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture
+documentation. (Masatake Iwasaki via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcaf8390/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index aa94a2f..c441ae8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -116,7 +116,8 @@ The placement of replicas is critical to HDFS reliability 
and performance. Optim
 
 Large HDFS instances run on a cluster of computers that commonly spread across 
many racks. Communication between two nodes in different racks has to go 
through switches. In most cases, network bandwidth between machines in the same 
rack is greater than network bandwidth between machines in different racks.
 
-The NameNode determines the rack id each DataNode belongs to via the process 
outlined in [Hadoop Rack 
Awareness](../hadoop-common/ClusterSetup.html#HadoopRackAwareness). A simple 
but non-optimal policy is to place replicas on unique racks. This prevents 
losing data when an entire rack fails and allows use of bandwidth from multiple 
racks when reading data. This policy evenly distributes replicas in the cluster 
which makes it easy to balance load on component failure. However, this policy 
increases the cost of writes because a write needs to transfer blocks to 
multiple racks.
+The NameNode determines the rack id each DataNode belongs to via the process 
outlined in [Hadoop Rack Awareness](../hadoop-common/RackAwareness.html).
+A simple but non-optimal policy is to place replicas on unique racks. This 
prevents losing data when an entire rack fails and allows use of bandwidth from 
multiple racks when reading data. This policy evenly distributes replicas in 
the cluster which makes it easy to balance load on component failure. However, 
this policy increases the cost of writes because a write needs to transfer 
blocks to multiple racks.
 
 For the common case, when the replication factor is three, HDFS’s placement 
policy is to put one replica on one node in the local rack, another on a 
different node in the local rack, and the last on a different node in a 
different rack. This policy cuts the inter-rack write traffic which generally 
improves write performance. The chance of rack failure is far less than that of 
node failure; this policy does not impact data reliability and availability 
guarantees. However, it does reduce the aggregate network bandwidth used when 
reading data since a block is placed in only two unique racks rather than 
three. With this policy, the replicas of a file do not evenly distribute across 
the racks. One third of replicas are on one node, two thirds of replicas are on 
one rack, and the other third are evenly distributed across the remaining 
racks. This policy improves write performance without compromising data 
reliability or read performance.
 



[38/42] hadoop git commit: YARN-1643. Make ContainersMonitor support changing monitoring size of an allocated container. Contributed by Meng Ding and Wangda Tan

2015-08-25 Thread wangda
YARN-1643. Make ContainersMonitor support changing monitoring size of an 
allocated container. Contributed by Meng Ding and Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24514ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24514ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24514ea

Branch: refs/heads/YARN-1197
Commit: b24514ea20e7015d41b07fb7f6a21d5534ff58e5
Parents: ea7e887
Author: Jian He 
Authored: Wed Aug 5 15:19:33 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:06:18 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../monitor/ContainersMonitorImpl.java  | 207 ++--
 .../TestContainerManagerWithLCE.java|  11 +
 .../containermanager/TestContainerManager.java  |  96 +++
 .../monitor/MockResourceCalculatorPlugin.java   |  69 ++
 .../MockResourceCalculatorProcessTree.java  |  57 +
 .../TestContainersMonitorResourceChange.java| 248 +++
 7 files changed, 615 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24514ea/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5a1ce6e..758e1a8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -196,6 +196,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3867. ContainerImpl changes to support container resizing. (Meng Ding 
 via jianhe)
 
+YARN-1643. Make ContainersMonitor support changing monitoring size of an
+allocated container. (Meng Ding and Wangda Tan)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24514ea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index afb51ad..b3839d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -18,13 +18,11 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -32,12 +30,14 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
 import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
@@ -56,16 +56,16 @@ public class ContainersMonitorImpl extends AbstractService 
implements
   private boolean containerMetricsEnabled;
   private long containerMetricsPeriodMs;
 
-  final List containersToBeRemoved;
-  final Map containersToBeAdded;
-  Map trackingContainers =
-  new HashMap();
+  @VisibleForTesting
+  final Map trackingContainers =
+  n

[39/42] hadoop git commit: YARN-1645. ContainerManager implementation to support container resizing. Contributed by Meng Ding & Wangda Tan

2015-08-25 Thread wangda
YARN-1645. ContainerManager implementation to support container resizing. 
Contributed by Meng Ding & Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582ecf8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582ecf8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582ecf8e

Branch: refs/heads/YARN-1197
Commit: 582ecf8e4215c2a4609cfa7a77c89321c49c2a37
Parents: 8d0b68f
Author: Jian He 
Authored: Tue Jul 21 16:10:40 2015 -0700
Committer: Wangda Tan 
Committed: Tue Aug 25 10:06:18 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../CMgrDecreaseContainersResourceEvent.java|  37 
 .../nodemanager/ContainerManagerEventType.java  |   1 +
 .../containermanager/ContainerManagerImpl.java  | 180 --
 .../container/ChangeContainerResourceEvent.java |  36 
 .../container/ContainerEventType.java   |   4 +
 .../nodemanager/DummyContainerManager.java  |   6 +-
 .../TestContainerManagerWithLCE.java|  22 +++
 .../BaseContainerManagerTest.java   |  43 -
 .../containermanager/TestContainerManager.java  | 190 ++-
 10 files changed, 486 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582ecf8e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e6b..f0f7732 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -190,6 +190,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1449. AM-NM protocol changes to support container resizing.
 (Meng Ding & Wangda Tan via jianhe)
 
+YARN-1645. ContainerManager implementation to support container resizing.
+(Meng Ding & Wangda Tan via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/582ecf8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
new file mode 100644
index 000..9479d0b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import java.util.List;
+
+public class CMgrDecreaseContainersResourceEvent extends ContainerManagerEvent 
{
+
+  private final List containersToDecrease;
+
+  public CMgrDecreaseContainersResourceEvent(List
+  containersToDecrease) {
+super(ContainerManagerEventType.DECREASE_CONTAINERS_RESOURCE);
+this.containersToDecrease = containersToDecrease;
+  }
+
+  public List getContainersToDecrease() {
+return this.containersToDecrease;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/582ecf8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
 
b/hadoop-yarn-project/hadoop-yar

[14/50] [abbrv] hadoop git commit: MAPREDUCE-6335. Created MR job based performance test driver for the timeline service v2. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
MAPREDUCE-6335. Created MR job based performance test driver for the timeline 
service v2. Contributed by Sangjin Lee.

(cherry picked from commit b689f5d43d3f5434a30fe52f1a7e12e1fc5c71f4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f0b1cae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f0b1cae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f0b1cae

Branch: refs/heads/YARN-2928
Commit: 8f0b1cae5e1fd467efa6cc773ff744b27f05a2b3
Parents: 8e58f94
Author: Zhijie Shen 
Authored: Tue Apr 28 19:46:01 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:10 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../mapred/TimelineServicePerformanceV2.java| 298 +++
 .../apache/hadoop/test/MapredTestDriver.java|   3 +
 3 files changed, 304 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f0b1cae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5ac0d3b..2805780 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -9,6 +9,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history
 events and counters. (Junping Du via zjshen)
 
+MAPREDUCE-6335. Created MR job based performance test driver for the
+timeline service v2. (Sangjin Lee via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f0b1cae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
new file mode 100644
index 000..de46617
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.SleepJob.SleepInputFormat;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.AppLevelTimelineCollector;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+
+public class TimelineServicePerformanceV2 extends Configured implements Tool {
+  private stati

[18/50] [abbrv] hadoop git commit: YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. 
Contributed by Sangjin Lee.

(cherry picked from commit b059dd4882fd759e4762cc11c019be4b68fb74c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e93fa602
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e93fa602
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e93fa602

Branch: refs/heads/YARN-2928
Commit: e93fa6023bf6c154aa4817a45020d385fefed22a
Parents: 7a68cda
Author: Junping Du 
Authored: Wed May 13 11:54:24 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../collectormanager/NMCollectorService.java|  5 +++
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../application/TestApplication.java|  3 +-
 .../collector/NodeTimelineCollectorManager.java | 46 ++--
 5 files changed, 34 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b06502..ec9abc9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -70,6 +70,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3529. Added mini HBase cluster and Phoenix support to timeline service
 v2 unit tests. (Li Lu via zjshen)
 
+YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
+Sangjin Lee via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index dc5601f..db79ee5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -81,6 +81,11 @@ public class NMCollectorService extends CompositeService 
implements
 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
 server.start();
+collectorServerAddress = conf.updateConnectAddr(
+YarnConfiguration.NM_BIND_HOST,
+YarnConfiguration.NM_COLLECTOR_SERVICE_ADDRESS,
+YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_ADDRESS,
+server.getListenerAddress());
 // start remaining services
 super.serviceStart();
 LOG.info("NMCollectorService started at " + collectorServerAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 4dd9fa6..aa9b102 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -878,7 +878,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
 TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
 long flowRunId = 0L;
 if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
-  flowRunId = Long.valueOf(flowRunIdStr);
+  flowRunId = Long.parseLong(flowRunIdStr);
 }
 Application appl

[09/50] [abbrv] hadoop git commit: YARN-3374. Collector's web server should randomly bind an available port. Contributed by Zhijie Shen

2015-08-25 Thread sjlee
YARN-3374. Collector's web server should randomly bind an available port. 
Contributed by Zhijie Shen

(cherry picked from commit 3aa898e734a1e4368ddf1d0bbd31f9b4de53ceba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9c81ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9c81ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9c81ac5

Branch: refs/heads/YARN-2928
Commit: a9c81ac5729e3e2cb39210d4679a8da9ca5c4e4e
Parents: e8b5ab6
Author: Junping Du 
Authored: Thu Apr 2 11:59:59 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:38:44 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../hadoop/yarn/conf/YarnConfiguration.java |  1 +
 .../collector/TimelineCollectorManager.java | 20 ++--
 .../collector/TestTimelineCollectorManager.java | 12 
 4 files changed, 26 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ed5dc88..76fa0a8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -44,6 +44,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3040. Make putEntities operation be aware of the app's context. 
(Zhijie Shen 
 via junping_du)
 
+YARN-3374. Collector's web server should randomly bind an available port. (
+Zhijie Shen via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 51f2b2d..1ba7f36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1515,6 +1515,7 @@ public class YarnConfiguration extends Configuration {
   /** The listening endpoint for the timeline service application.*/
   public static final String TIMELINE_SERVICE_BIND_HOST =
   TIMELINE_SERVICE_PREFIX + "bind-host";
+  public static final String DEFAULT_TIMELINE_SERVICE_BIND_HOST = "0.0.0.0";
 
   /** The number of threads to handle client RPC API requests. */
   public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 909027e..5f23c25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -210,22 +210,17 @@ public class TimelineCollectorManager extends 
CompositeService {
*/
   private void startWebApp() {
 Configuration conf = getConfig();
-// use the same ports as the old ATS for now; we could create new 
properties
-// for the new timeline service if needed
-String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
-this.timelineRestServerBindAddress = WebAppUtils.getResolvedAddress(
-NetUtils.createSocketAddr(bindAddress));
-LOG.info("Instantiating the per-node collector webapp at " +
-timelineRestServerBindAddress);
+String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + "

[08/50] [abbrv] hadoop git commit: YARN-3333. Rename TimelineAggregator etc. to TimelineCollector. Contributed by Sangjin Lee

2015-08-25 Thread sjlee
YARN-. Rename TimelineAggregator etc. to TimelineCollector. Contributed by 
Sangjin Lee

(cherry picked from commit dda84085cabd8fdf143b380e54e1730802fd9912)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63c7210c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63c7210c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63c7210c

Branch: refs/heads/YARN-2928
Commit: 63c7210c248be9a8e65b249b0593f1ace6003db5
Parents: 32acd9b
Author: Junping Du 
Authored: Thu Mar 19 11:49:07 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:38:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   5 +-
 .../hadoop-yarn/hadoop-yarn-api/pom.xml |   4 +
 .../api/protocolrecords/AllocateResponse.java   |  20 +-
 .../timelineservice/TimelineWriteResponse.java  |  20 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  20 +-
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../pom.xml |  10 +
 .../distributedshell/ApplicationMaster.java |  54 ++--
 .../applications/distributedshell/Client.java   |   8 +-
 .../distributedshell/TestDistributedShell.java  |  10 +-
 .../hadoop/yarn/client/api/AMRMClient.java  |   6 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |   4 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |  20 +-
 .../impl/pb/AllocateResponsePBImpl.java |  16 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |   2 +-
 .../client/api/impl/TimelineClientImpl.java |  32 +--
 .../src/main/resources/yarn-default.xml |  14 +-
 .../hadoop/yarn/TestContainerLaunchRPC.java |   2 +-
 .../hadoop/yarn/api/TestAllocateResponse.java   |  12 +-
 .../hadoop-yarn-server-common/pom.xml   |   2 +-
 .../api/AggregatorNodemanagerProtocol.java  |  56 
 .../api/AggregatorNodemanagerProtocolPB.java|  33 ---
 .../api/CollectorNodemanagerProtocol.java   |  57 
 .../api/CollectorNodemanagerProtocolPB.java |  33 +++
 ...gregatorNodemanagerProtocolPBClientImpl.java |  94 ---
 ...ollectorNodemanagerProtocolPBClientImpl.java |  94 +++
 ...regatorNodemanagerProtocolPBServiceImpl.java |  61 
 ...llectorNodemanagerProtocolPBServiceImpl.java |  59 
 .../protocolrecords/NodeHeartbeatRequest.java   |  13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   8 +-
 .../ReportNewAggregatorsInfoRequest.java|  53 
 .../ReportNewAggregatorsInfoResponse.java   |  32 ---
 .../ReportNewCollectorInfoRequest.java  |  53 
 .../ReportNewCollectorInfoResponse.java |  32 +++
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  58 ++--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  60 ++--
 .../ReportNewAggregatorsInfoRequestPBImpl.java  | 142 --
 .../ReportNewAggregatorsInfoResponsePBImpl.java |  74 -
 .../pb/ReportNewCollectorInfoRequestPBImpl.java | 142 ++
 .../ReportNewCollectorInfoResponsePBImpl.java   |  74 +
 .../server/api/records/AppAggregatorsMap.java   |  33 ---
 .../server/api/records/AppCollectorsMap.java|  46 +++
 .../impl/pb/AppAggregatorsMapPBImpl.java| 151 --
 .../records/impl/pb/AppCollectorsMapPBImpl.java | 151 ++
 .../proto/aggregatornodemanager_protocol.proto  |  29 --
 .../proto/collectornodemanager_protocol.proto   |  29 ++
 .../yarn_server_common_service_protos.proto |  18 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java| 116 
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  24 +-
 .../hadoop/yarn/server/nodemanager/Context.java |  14 +-
 .../yarn/server/nodemanager/NodeManager.java|  56 ++--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  11 +-
 .../aggregatormanager/NMAggregatorService.java  | 113 
 .../collectormanager/NMCollectorService.java| 110 
 .../application/ApplicationImpl.java|   9 +-
 .../ApplicationMasterService.java   |  12 +-
 .../resourcemanager/ResourceTrackerService.java |  72 ++---
 .../server/resourcemanager/rmapp/RMApp.java |  22 +-
 .../rmapp/RMAppAggregatorUpdateEvent.java   |  36 ---
 .../rmapp/RMAppCollectorUpdateEvent.java|  37 +++
 .../resourcemanager/rmapp/RMAppEventType.java   |   4 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  60 ++--
 .../applicationsmanager/MockAsm.java|   6 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |   8 +-
 .../hadoop-yarn-server-tests/pom.xml|   5 +
 .../TestTimelineServiceClientIntegration.java   |  52 +++-
 .../hadoop-yarn-server-timelineservice/pom.xml  |  10 +
 .../aggregator/AppLevelTimelineAggregator.java  |  57 
 .../PerNodeTimelineAggregatorsAuxService.java   | 211 --
 .../aggregator/TimelineAggregator.java  | 122 
 .../TimelineAggregatorWebService.java   | 180 
 .../TimelineAggregatorsCollecti

[33/50] [abbrv] hadoop git commit: YARN-3836. add equals and hashCode to TimelineEntity and other classes in the data model (Li Lu via sjlee)

2015-08-25 Thread sjlee
YARN-3836. add equals and hashCode to TimelineEntity and other classes in the 
data model (Li Lu via sjlee)

(cherry picked from commit 2d4a8f4563c06339717ca9410b2794754603fba3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cced5944
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cced5944
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cced5944

Branch: refs/heads/YARN-2928
Commit: cced59444b880ed60bf5f288ef692ca6fca198ad
Parents: e281987
Author: Sangjin Lee 
Authored: Thu Jul 9 20:50:48 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../records/timelineservice/TimelineEntity.java | 89 +++-
 .../records/timelineservice/TimelineEvent.java  | 41 -
 .../records/timelineservice/TimelineMetric.java | 30 +++
 .../TestTimelineServiceRecords.java | 36 +++-
 5 files changed, 195 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cced5944/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d77ad59..6e10926 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -87,6 +87,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3706. Generalize native HBase writer for additional tables (Joep
 Rottinghuis via sjlee)
 
+YARN-3836. add equals and hashCode to TimelineEntity and other classes in
+the data model (Li Lu via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cced5944/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 60fba85..9ef2d90 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -31,11 +31,25 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+/**
+ * The basic timeline entity data structure for timeline service v2. Timeline
+ * entity objects are not thread safe and should not be accessed concurrently.
+ * All collection members will be initialized into empty collections. Two
+ * timeline entities are equal iff. their type and id are identical.
+ *
+ * All non-primitive type, non-collection members will be initialized into 
null.
+ * User should set the type and id of a timeline entity to make it valid (can 
be
+ * checked by using the {@link #isValid()} method). Callers to the getters
+ * should perform null checks for non-primitive type, non-collection members.
+ *
+ * Callers are recommended not to alter the returned collection objects from 
the
+ * getters.
+ */
 @XmlRootElement(name = "entity")
 @XmlAccessorType(XmlAccessType.NONE)
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
-public class TimelineEntity {
+public class TimelineEntity implements Comparable {
   protected final static String SYSTEM_INFO_KEY_PREFIX = "SYSTEM_INFO_";
 
   @XmlRootElement(name = "identifier")
@@ -77,6 +91,41 @@ public class TimelineEntity {
   "type='" + type + '\'' +
   ", id='" + id + '\'' + "]";
 }
+
+@Override
+public int hashCode() {
+  final int prime = 31;
+  int result = 1;
+  result = prime * result + ((id == null) ? 0 : id.hashCode());
+  result =
+prime * result + ((type == null) ? 0 : type.hashCode());
+  return result;
+}
+
+@Override
+public boolean equals(Object obj) {
+  if (this == obj)
+return true;
+  if (!(obj instanceof Identifier)) {
+return false;
+  }
+  Identifier other = (Identifier) obj;
+  if (id == null) {
+if (other.getId() != null) {
+  return false;
+}
+  } else if (!id.equals(other.getId())) {
+return false;
+  }
+  if (type == null) {
+if (other.getType() != null) {
+  return false;
+}
+  } else if (!type.equals(other.getType())) {
+return false;
+  }
+  return true;
+}
   }
 
   private TimelineEntity real;
@@ -471,6 +520,44 @@ public class TimelineEntity {
  

[34/50] [abbrv] hadoop git commit: YARN-3949. Ensure timely flush of timeline writes. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3949. Ensure timely flush of timeline writes. Contributed by Sangjin Lee.

(cherry picked from commit 967bef7e0396d857913caa2574afb103a5f0b81b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57f6d069
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57f6d069
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57f6d069

Branch: refs/heads/YARN-2928
Commit: 57f6d0698047c8ca9a260a497550c6656e2fd9d2
Parents: cced594
Author: Junping Du 
Authored: Sat Jul 25 10:30:29 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../src/main/resources/yarn-default.xml | 17 -
 .../collector/TimelineCollectorManager.java | 65 ++--
 .../storage/FileSystemTimelineWriterImpl.java   |  5 ++
 .../storage/HBaseTimelineWriterImpl.java|  6 ++
 .../storage/PhoenixTimelineWriterImpl.java  |  5 ++
 .../timelineservice/storage/TimelineWriter.java |  9 +++
 .../TestNMTimelineCollectorManager.java |  5 ++
 9 files changed, 119 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57f6d069/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6e10926..fd19320 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -79,6 +79,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3047. [Data Serving] Set up ATS reader with basic request serving
 structure and lifecycle (Varun Saxena via sjlee)
 
+YARN-3949. Ensure timely flush of timeline writes. (Sangjin Lee via
+junping_du)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57f6d069/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2bc7d70..2c59bd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1470,6 +1470,15 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  /** The setting that controls how often the timeline collector flushes the
+   * timeline writer.
+   */
+  public static final String TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS =
+  TIMELINE_SERVICE_PREFIX + "writer.flush-interval-seconds";
+
+  public static final int
+  DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS = 60;
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57f6d069/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 10450a9..8adab4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -758,7 +758,15 @@
 yarn.system-metrics-publisher.enabled
 false
   
- 
+
+  
+The setting that controls whether yarn container metrics is
+published to the timeline server or not by RM. This configuration setting 
is
+for ATS V2.
+yarn.rm.system-metrics-publisher.emit-container-events
+false
+  
+
 
   
 Number of worker threads that send the yarn system metrics
@@ -1880,6 +1888,13 @@
 ${hadoop.tmp.dir}/yarn/timeline
   
 
+  
+The setting that controls how often the timeline collector
+flushes the timeline writer.
+yarn.timeline-service.writer.flush-interval-seconds
+60
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57f6d069/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org

[49/50] [abbrv] hadoop git commit: YARN-4025. Deal with byte representations of Longs in writer code. Contributed by Sangjin Lee and Vrushali C.

2015-08-25 Thread sjlee
YARN-4025. Deal with byte representations of Longs in writer code. Contributed 
by Sangjin Lee and Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89f8fd2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89f8fd2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89f8fd2e

Branch: refs/heads/YARN-2928
Commit: 89f8fd2ebc97b9f96e329fdf1ed0692f12d3faee
Parents: 702a214
Author: Junping Du 
Authored: Wed Aug 19 10:00:33 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:52:45 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/HBaseTimelineReaderImpl.java|  68 +++---
 .../storage/HBaseTimelineWriterImpl.java|  20 +-
 .../application/ApplicationColumnPrefix.java|  40 
 .../storage/application/ApplicationTable.java   |   6 +-
 .../storage/common/ColumnHelper.java|  99 -
 .../storage/common/Separator.java   |  16 +-
 .../storage/common/TimelineWriterUtils.java |   9 +-
 .../storage/entity/EntityColumnPrefix.java  |  40 
 .../storage/entity/EntityTable.java |   6 +-
 .../storage/TestHBaseTimelineWriterImpl.java| 207 +++
 11 files changed, 373 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89f8fd2e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 492a098..cd7f849 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -94,6 +94,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3045. Implement NM writing container lifecycle events to Timeline
 Service v2. (Naganarasimha G R via junping_du)
 
+YARN-4025. Deal with byte representations of Longs in writer code.
+(Sangjin Lee and Vrushali C via junping_du)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89f8fd2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index 094f868..c514c20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -19,12 +19,9 @@ package 
org.apache.hadoop.yarn.server.timelineservice.storage;
 
 
 import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.NavigableSet;
@@ -431,44 +428,51 @@ public class HBaseTimelineReaderImpl
 Map columns = prefix.readResults(result);
 if (isConfig) {
   for (Map.Entry column : columns.entrySet()) {
-entity.addConfig(column.getKey(), column.getKey().toString());
+entity.addConfig(column.getKey(), column.getValue().toString());
   }
 } else {
   entity.addInfo(columns);
 }
   }
 
+  /**
+   * Read events from the entity table or the application table. The column 
name
+   * is of the form "eventId=timestamp=infoKey" where "infoKey" may be omitted
+   * if there is no info associated with the event.
+   *
+   * See {@link EntityTable} and {@link ApplicationTable} for a more detailed
+   * schema description.
+   */
   private static void readEvents(TimelineEntity entity, Result result,
   boolean isApplication) throws IOException {
 Map eventsMap = new HashMap<>();
-Map eventsResult = isApplication ?
-ApplicationColumnPrefix.EVENT.readResults(result) :
-EntityColumnPrefix.EVENT.readResults(result);
-for (Map.Entry eventResult : eventsResult.entrySet()) {
-  Collection tokens =
-  Separator.VALUES.splitEncoded(eventResult.getKey());
-  if (tokens.size() != 2 && tokens.size() != 3) {
-  

[11/50] [abbrv] hadoop git commit: YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)

2015-08-25 Thread sjlee
YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)

(cherry picked from commit 58221188811e0f61d842dac89e1f4ad4fd8aa182)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9e8d853
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9e8d853
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9e8d853

Branch: refs/heads/YARN-2928
Commit: e9e8d853ba904c89270e901a4a5327f85bd06050
Parents: 8438977
Author: Sangjin Lee 
Authored: Fri Apr 24 16:56:23 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../resourcemanager/RMActiveServiceContext.java |  13 +-
 .../server/resourcemanager/RMAppManager.java|  15 +-
 .../yarn/server/resourcemanager/RMContext.java  |   7 +-
 .../server/resourcemanager/RMContextImpl.java   |  12 +-
 .../server/resourcemanager/ResourceManager.java |  14 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 ++
 .../timelineservice/RMTimelineCollector.java| 111 
 .../RMTimelineCollectorManager.java |  75 ++
 .../TestTimelineServiceClientIntegration.java   |  12 +-
 .../collector/AppLevelTimelineCollector.java|   2 +-
 .../collector/NodeTimelineCollectorManager.java | 223 
 .../PerNodeTimelineCollectorsAuxService.java|  15 +-
 .../collector/TimelineCollector.java|   2 +-
 .../collector/TimelineCollectorManager.java | 259 +++
 .../collector/TimelineCollectorWebService.java  |  23 +-
 .../TestNMTimelineCollectorManager.java | 160 
 ...TestPerNodeTimelineCollectorsAuxService.java |  24 +-
 .../collector/TestTimelineCollectorManager.java | 160 
 19 files changed, 585 insertions(+), 559 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9e8d853/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aea859a..43d292a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -53,6 +53,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3391. Clearly define flow ID/ flow run / flow version in API and 
storage.
 (Zhijie Shen via junping_du)
 
+YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9e8d853/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index d79e542..17346ef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -47,7 +47,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRen
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
-import 
org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollector;
+import 
org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 
@@ -95,7 +95,7 @@ public class RMActiveServiceContext {
   private ApplicationMasterService applicationMasterService;
   private RMApplicationHistoryWriter rmApplicationHistoryWriter;
   private SystemMetricsPublisher systemMetricsPublisher;
-  private RMTimelineCollector timelineCollector;
+  private RMTimelineCollectorManager timelineCollectorManager;
 
   private RMNodeLabelsManager nodeLabelManager;
   private long epoch;
@@ -377,14 +377,15 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
-  public RMTimelineCollector getRMTimelineCollector() {
-return timelineCollector;
+  public 

[01/50] [abbrv] hadoop git commit: YARN-3040. Make putEntities operation be aware of the app's context. Contributed by Zhijie Shen

2015-08-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 3c36922d7 -> be95107aa (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8b5ab64/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
index 5adae71..0f51656 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
@@ -138,7 +138,7 @@ public class TimelineCollectorWebService {
 LOG.error("Application not found");
 throw new NotFoundException(); // different exception?
   }
-  collector.postEntities(entities, callerUgi);
+  collector.putEntities(entities, callerUgi);
   return Response.ok().build();
 } catch (Exception e) {
   LOG.error("Error putting entities", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8b5ab64/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index f5603f6..41b6ac9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -52,7 +52,9 @@ public class FileSystemTimelineWriterImpl extends 
AbstractService
 
   /** default value for storage location on local disk */
   public static final String DEFAULT_TIMELINE_SERVICE_STORAGE_DIR_ROOT
-= "/tmp/timeline_service_data/";
+= "/tmp/timeline_service_data";
+
+  private static final String ENTITIES_DIR = "entities";
 
   /** Default extension for output files */
   public static final String TIMELINE_SERVICE_STORAGE_EXTENSION = ".thist";
@@ -61,38 +63,25 @@ public class FileSystemTimelineWriterImpl extends 
AbstractService
 super((FileSystemTimelineWriterImpl.class.getName()));
   }
 
-  /**
-   * Stores the entire information in {@link TimelineEntity} to the
-   * timeline store. Any errors occurring for individual write request objects
-   * will be reported in the response.
-   *
-   * @param data
-   *  a {@link TimelineEntity} object
-   * @return {@link TimelineWriteResponse} object.
-   * @throws IOException
-   */
   @Override
-  public TimelineWriteResponse write(TimelineEntities entities)
-  throws IOException {
+  public TimelineWriteResponse write(String clusterId, String userId,
+  String flowId, String flowRunId, String appId,
+  TimelineEntities entities) throws IOException {
 TimelineWriteResponse response = new TimelineWriteResponse();
 for (TimelineEntity entity : entities.getEntities()) {
-  write(entity, response);
+  write(clusterId, userId, flowId, flowRunId, appId, entity, response);
 }
 return response;
   }
 
-  private void write(TimelineEntity entity,
+  private void write(String clusterId, String userId,
+  String flowId, String flowRunId, String appId, TimelineEntity entity,
   TimelineWriteResponse response) throws IOException {
 PrintWriter out = null;
 try {
-  File outputDir = new File(outputRoot + entity.getType());
-  String fileName = outputDir + "/" + entity.getId()
-  + TIMELINE_SERVICE_STORAGE_EXTENSION;
-  if (!outputDir.exists()) {
-if (!outputDir.mkdirs()) {
-  throw new IOException("Could not create directories for " + 
fileName);
-}
-  }
+  String dir = mkdirs(outputRoot, ENTITIES_DIR, clusterId, us

[40/50] [abbrv] hadoop git commit: YARN-3049. [Storage Implementation] Implement storage reader interface to fetch raw data from HBase backend (Zhijie Shen via sjlee)

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bdf34f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index fd5643d..ab02779 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -38,11 +39,15 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineWriterUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
@@ -71,6 +76,8 @@ public class TestHBaseTimelineWriterImpl {
   private static void createSchema() throws IOException {
 new EntityTable()
 .createTable(util.getHBaseAdmin(), util.getConfiguration());
+new AppToFlowTable()
+.createTable(util.getHBaseAdmin(), util.getConfiguration());
   }
 
   @Test
@@ -138,10 +145,15 @@ public class TestHBaseTimelineWriterImpl {
 te.addEntity(entity);
 
 HBaseTimelineWriterImpl hbi = null;
+HBaseTimelineReaderImpl hbr = null;
 try {
   Configuration c1 = util.getConfiguration();
   hbi = new HBaseTimelineWriterImpl(c1);
   hbi.init(c1);
+  hbi.start();
+  hbr = new HBaseTimelineReaderImpl();
+  hbr.init(c1);
+  hbr.start();
   String cluster = "cluster1";
   String user = "user1";
   String flow = "some_flow_name";
@@ -255,9 +267,22 @@ public class TestHBaseTimelineWriterImpl {
   assertEquals(1, rowCount);
   assertEquals(17, colCount);
 
+  TimelineEntity e1 = hbr.getEntity(user, cluster, flow, runid, appName,
+  entity.getType(), entity.getId(), 
EnumSet.of(TimelineReader.Field.ALL));
+  Set es1 = hbr.getEntities(user, cluster, flow, runid,
+  appName, entity.getType(), null, null, null, null, null, null, null,
+  null, null, null, null, EnumSet.of(TimelineReader.Field.ALL));
+  assertNotNull(e1);
+  assertEquals(1, es1.size());
 } finally {
-  hbi.stop();
-  hbi.close();
+  if (hbi != null) {
+hbi.stop();
+hbi.close();
+  }
+  if (hbr != null) {
+hbr.stop();
+hbr.close();
+  }
 }
 
 // Somewhat of a hack, not a separate test in order not to have to deal 
with
@@ -283,7 +308,7 @@ public class TestHBaseTimelineWriterImpl {
 
   private void testAdditionalEntity() throws IOException {
 TimelineEvent event = new TimelineEvent();
-String eventId = "foo_event_id";
+String eventId = ApplicationMetricsConstants.CREATED_EVENT_TYPE;
 event.setId(eventId);
 Long expTs = 1436512802000L;
 event.setTimestamp(expTs);
@@ -291,19 +316,23 @@ public class TestHBaseTimelineWriterImpl {
 Object expVal = "test";
 event.addInfo(expKey, expVal);
 
-final TimelineEntity entity = new TimelineEntity();
-entity.setId("attempt_1329348432655_0001_m_08_18");
-entity.setType("FOO_ATTEMPT");
+final TimelineEntity entity = new ApplicationEntity();
+entity.setId(ApplicationId.newInstance(

[24/50] [abbrv] hadoop git commit: YARN-3044. Made RM write app, attempt and optional container lifecycle events to timeline service v2. Contributed by Naganarasimha G R.

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a22d26c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
new file mode 100644
index 000..9830a80
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -0,0 +1,374 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.metrics;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.metrics.AbstractTimelineServicePublisher.MultiThreadedDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.AppLevelTimelineCollector;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestSystemMetricsPublisherForV2 {
+
+  /**
+   * is the folder where the FileSystemTimelineWriterImpl writes the entities
+   */
+  protected static File testRootDir = new File("target",
+  TestSystemMetricsPublisherForV2.class.getName() + "-localDir")
+  .getAbsoluteFile();
+
+  private static SystemMetricsPublisher metricsPublisher;
+  private static DrainDispatcher dispatcher = new DrainDispatcher();
+  private static final String DEFAULT_FLOW_VERSION = "1";
+  private static final long DEFAULT_FLOW_RUN = 1;
+
+  private static ConcurrentMap rmAppsMapInContext;
+

[04/50] [abbrv] hadoop git commit: YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration. 
Contributed by Sangjin Lee.

(cherry picked from commit 04de2ceccd02edb00bd671a63e04855c132e9735)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afa99256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afa99256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afa99256

Branch: refs/heads/YARN-2928
Commit: afa99256694dc3d4c4c2f74e2a5a9e7c846c57a2
Parents: 63c7210
Author: Zhijie Shen 
Authored: Fri Mar 20 00:20:24 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:38:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../timelineservice/collector/TimelineCollectorManager.java   | 3 +--
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afa99256/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 42da7bf..d9696c9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -35,6 +35,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-. Rename TimelineAggregator etc. to TimelineCollector. (Sangjin 
Lee
 via junping_du)
 
+YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
+(Sangjin Lee via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afa99256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 3691162..3a4515e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -239,8 +239,7 @@ public class TimelineCollectorManager extends 
CompositeService {
   + GenericExceptionHandler.class.getPackage().getName() + ";"
   + YarnJacksonJaxbJsonProvider.class.getPackage().getName(),
   "/*");
-  timelineRestServer.setAttribute(COLLECTOR_MANAGER_ATTR_KEY,
-  TimelineCollectorManager.getInstance());
+  timelineRestServer.setAttribute(COLLECTOR_MANAGER_ATTR_KEY, this);
   timelineRestServer.start();
 } catch (Exception e) {
   String msg = "The per-node collector webapp failed to start.";



[02/50] [abbrv] hadoop git commit: YARN-3040. Make putEntities operation be aware of the app's context. Contributed by Zhijie Shen

2015-08-25 Thread sjlee
YARN-3040. Make putEntities operation be aware of the app's context. 
Contributed by Zhijie Shen

(cherry picked from commit db2f0238915d6e1a5b85c463426b5e072bd4698d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8b5ab64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8b5ab64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8b5ab64

Branch: refs/heads/YARN-2928
Commit: e8b5ab64d4ecee98d3463826c50ce18dfa7eea43
Parents: a798523
Author: Junping Du 
Authored: Thu Mar 26 09:59:32 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:38:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   1 +
 .../applications/distributedshell/Client.java   |  27 +++-
 .../distributedshell/TestDistributedShell.java  | 125 +---
 .../yarn/util/timeline/TimelineUtils.java   |  16 +++
 .../api/CollectorNodemanagerProtocol.java   |  16 +++
 ...ollectorNodemanagerProtocolPBClientImpl.java |  20 +++
 ...llectorNodemanagerProtocolPBServiceImpl.java |  21 +++
 .../GetTimelineCollectorContextRequest.java |  37 +
 .../GetTimelineCollectorContextResponse.java|  46 ++
 ...etTimelineCollectorContextRequestPBImpl.java | 127 +
 ...tTimelineCollectorContextResponsePBImpl.java | 141 +++
 .../proto/collectornodemanager_protocol.proto   |   1 +
 .../yarn_server_common_service_protos.proto |   9 ++
 .../java/org/apache/hadoop/yarn/TestRPC.java|  39 +
 .../collectormanager/NMCollectorService.java|  18 ++-
 .../containermanager/ContainerManagerImpl.java  |  14 +-
 .../application/Application.java|   4 +
 .../application/ApplicationImpl.java|  17 ++-
 .../application/TestApplication.java|   3 +-
 .../yarn/server/nodemanager/webapp/MockApp.java |  10 ++
 .../nodemanager/webapp/TestNMWebServices.java   |   2 +-
 .../resourcemanager/amlauncher/AMLauncher.java  |  23 ++-
 .../timelineservice/RMTimelineCollector.java|   7 +
 .../TestTimelineServiceClientIntegration.java   |  19 ++-
 .../collector/AppLevelTimelineCollector.java|  33 -
 .../PerNodeTimelineCollectorsAuxService.java|   2 +-
 .../collector/TimelineCollector.java|  19 ++-
 .../collector/TimelineCollectorContext.java |  81 +++
 .../collector/TimelineCollectorManager.java |  32 -
 .../collector/TimelineCollectorWebService.java  |   2 +-
 .../storage/FileSystemTimelineWriterImpl.java   |  69 +
 .../timelineservice/storage/TimelineWriter.java |   9 +-
 ...TestPerNodeTimelineCollectorsAuxService.java |  43 --
 .../collector/TestTimelineCollectorManager.java |  41 +-
 .../TestFileSystemTimelineWriterImpl.java   |  22 ++-
 36 files changed, 956 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8b5ab64/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82ed275..ed5dc88 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -41,6 +41,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3034. Implement RM starting its timeline collector. (Naganarasimha G R
 via junping_du)
 
+YARN-3040. Make putEntities operation be aware of the app's context. 
(Zhijie Shen 
+via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8b5ab64/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e72c1c0..51f2b2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -124,6 +124,7 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PREFIX = "yarn.resourcemanager.";
 
   public static final String RM_CLUSTER_ID = RM_PREFIX + "cluster-id";
+  public static final String DEFAULT_RM_CLUSTER_ID = "yarn_cluster";
 
   public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8b5ab64/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/ja

[26/50] [abbrv] hadoop git commit: YARN-3721. build is broken on YARN-2928 branch due to possible dependency cycle (Li Lu via sjlee)

2015-08-25 Thread sjlee
YARN-3721. build is broken on YARN-2928 branch due to possible dependency cycle 
(Li Lu via sjlee)

(cherry picked from commit a9738ceb17b50cce8844fd42bb800c7f83f15caf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/534aae59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/534aae59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/534aae59

Branch: refs/heads/YARN-2928
Commit: 534aae59ac8328edf75dadca99b06f6c4ecddc39
Parents: d0d1cc0
Author: Sangjin Lee 
Authored: Thu May 28 12:03:53 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:12 2015 -0700

--
 hadoop-project/pom.xml  | 97 ++--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop-yarn-server-timelineservice/pom.xml  |  1 -
 3 files changed, 53 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/534aae59/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7a35939..1182d6d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -996,55 +996,58 @@
 
   
 
-
-  org.apache.hbase
-  hbase-client
-  ${hbase.version}
-
-
-  org.apache.phoenix
-  phoenix-core
-  ${phoenix.version}
-  
-
-
-  jline
-  jline
-
-  
-
-
-  org.apache.phoenix
-  phoenix-core
-  test-jar
-  ${phoenix.version}
-  test
-
-
-  org.apache.hbase
-  hbase-it
-  ${hbase.version}
-  test
-  tests
-
-
-  org.apache.hbase
-  hbase-testing-util
-  ${hbase.version}
-  test
-  true
-  
-
-  org.jruby
-  jruby-complete
-
-
+  
+org.apache.hbase
+hbase-client
+${hbase.version}
+  
+  
+org.apache.phoenix
+phoenix-core
+${phoenix.version}
+
+  
+  
+jline
+jline
+  
+
+  
+  
+org.apache.phoenix
+phoenix-core
+test-jar
+${phoenix.version}
+test
+  
+  
+org.apache.hbase
+hbase-it
+${hbase.version}
+test
+tests
+  
+  
+org.apache.hbase
+hbase-testing-util
+${hbase.version}
+test
+true
+
+  
+org.jruby
+jruby-complete
+  
+  
 org.apache.hadoop
 hadoop-hdfs
-
-  
-
-
+  
+  
+org.apache.hadoop
+hadoop-minicluster
+  
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/534aae59/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bc3880c..5d8aefc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -79,6 +79,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
 test data (Vrushali C via sjlee)
 
+YARN-3721. build is broken on YARN-2928 branch due to possible dependency
+cycle (Li Lu via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/534aae59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index 1e914de..da7fadf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -159,7 +159,6 @@
   test
   true
 
-
   
 
   



[30/50] [abbrv] hadoop git commit: YARN-3801. [JDK-8] Exclude jdk.tools from hbase-client and hbase-testing-util (Tsuyoshi Ozawa via sjlee)

2015-08-25 Thread sjlee
YARN-3801. [JDK-8] Exclude jdk.tools from hbase-client and hbase-testing-util 
(Tsuyoshi Ozawa via sjlee)

(cherry picked from commit a1bb9137af84a34bde799f45e7ab8a21e33d55e0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/623b3abc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/623b3abc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/623b3abc

Branch: refs/heads/YARN-2928
Commit: 623b3abc6789da026657b18ee6de6ae106e2af37
Parents: a22d26c
Author: Sangjin Lee 
Authored: Mon Jun 15 21:15:33 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:13 2015 -0700

--
 hadoop-project/pom.xml  | 10 ++
 hadoop-yarn-project/CHANGES.txt |  3 +++
 2 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/623b3abc/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1182d6d..faac713 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1000,6 +1000,12 @@
 org.apache.hbase
 hbase-client
 ${hbase.version}
+
+  
+jdk.tools
+jdk.tools
+  
+
   
   
 org.apache.phoenix
@@ -1046,6 +1052,10 @@
 org.apache.hadoop
 hadoop-minicluster
   
+  
+jdk.tools
+jdk.tools
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/623b3abc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a673fb2..1cf1f26 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -85,6 +85,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3044. Made RM write app, attempt and optional container lifecycle
 events to timeline service v2. (Naganarasimha G R via zjshen)
 
+YARN-3801. [JDK-8] Exclude jdk.tools from hbase-client and
+hbase-testing-util (Tsuyoshi Ozawa via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via



[23/50] [abbrv] hadoop git commit: YARN-3411. [Storage implementation] explore the native HBase write schema for storage (Vrushali C via sjlee)

2015-08-25 Thread sjlee
YARN-3411. [Storage implementation] explore the native HBase write schema for 
storage (Vrushali C via sjlee)

(cherry picked from commit 7a3068854d27eadae1c57545988f5b2029bf119a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3edd630
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3edd630
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3edd630

Branch: refs/heads/YARN-2928
Commit: b3edd6301b4ec8c59928cca0d5a41c94fb23740d
Parents: e93fa60
Author: Sangjin Lee 
Authored: Thu May 21 14:11:01 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../collector/TimelineCollectorManager.java |  19 +
 .../storage/EntityColumnDetails.java| 110 ++
 .../storage/EntityColumnFamily.java |  95 +
 .../storage/HBaseTimelineWriterImpl.java| 225 
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 231 +
 .../storage/TimelineWriterUtils.java| 344 +++
 .../storage/TestHBaseTimelineWriterImpl.java| 292 
 10 files changed, 1448 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3edd630/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ec9abc9..3df01e9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -73,6 +73,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
 Sangjin Lee via junping_du)
 
+YARN-3411. [Storage implementation] explore the native HBase write schema
+for storage (Vrushali C via sjlee)
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3edd630/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 953d9b7..d54715c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -59,6 +59,13 @@ public abstract class TimelineCollectorManager extends 
AbstractService {
 super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+super.serviceStart();
+if (writer != null) {
+  writer.start();
+}
+  }
 
   // access to this map is synchronized with the map itself
   private final Map collectors =
@@ -147,4 +154,16 @@ public abstract class TimelineCollectorManager extends 
AbstractService {
 return collectors.containsKey(appId);
   }
 
+  @Override
+  protected void serviceStop() throws Exception {
+if (collectors != null && collectors.size() > 1) {
+  for (TimelineCollector c : collectors.values()) {
+c.serviceStop();
+  }
+}
+if (writer != null) {
+  writer.close();
+}
+super.serviceStop();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3edd630/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
new file mode 100644
index 000..2894c41
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org

[07/50] [abbrv] hadoop git commit: YARN-3333. Rename TimelineAggregator etc. to TimelineCollector. Contributed by Sangjin Lee

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c7210c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
new file mode 100644
index 000..3498de9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import java.util.List;
+import java.util.Arrays;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
+import org.apache.hadoop.yarn.util.Records;
+
+@Private
+public abstract class ReportNewCollectorInfoRequest {
+
+  public static ReportNewCollectorInfoRequest newInstance(
+  List appCollectorsList) {
+ReportNewCollectorInfoRequest request =
+Records.newRecord(ReportNewCollectorInfoRequest.class);
+request.setAppCollectorsList(appCollectorsList);
+return request;
+  }
+
+  public static ReportNewCollectorInfoRequest newInstance(
+  ApplicationId id, String collectorAddr) {
+ReportNewCollectorInfoRequest request =
+Records.newRecord(ReportNewCollectorInfoRequest.class);
+request.setAppCollectorsList(
+Arrays.asList(AppCollectorsMap.newInstance(id, collectorAddr)));
+return request;
+  }
+
+  public abstract List getAppCollectorsList();
+
+  public abstract void setAppCollectorsList(
+  List appCollectorsList);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c7210c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoResponse.java
new file mode 100644
index 000..4157c47
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoResponse.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.util.Records;
+
+public abstract class ReportNewCollectorInfoResponse {
+
+  @Private
+  public static ReportNewCollect

[03/50] [abbrv] hadoop git commit: YARN-3034. Implement RM starting its timeline collector. Contributed by Naganarasimha G R

2015-08-25 Thread sjlee
YARN-3034. Implement RM starting its timeline collector. Contributed by 
Naganarasimha G R

(cherry picked from commit dc12cad2b89f643dafa0def863325cb374c7670c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7985239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7985239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7985239

Branch: refs/heads/YARN-2928
Commit: a798523971852d15a4a9098f4e689e2c801fab06
Parents: afa9925
Author: Junping Du 
Authored: Tue Mar 24 13:42:14 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:38:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  14 ++-
 .../src/main/resources/yarn-default.xml |  11 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   4 +
 .../resourcemanager/RMActiveServiceContext.java |  43 
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  14 ++-
 .../server/resourcemanager/ResourceManager.java |  44 ++--
 .../metrics/SystemMetricsPublisher.java |  29 +++---
 .../timelineservice/RMTimelineCollector.java| 104 +++
 10 files changed, 241 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7985239/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d9696c9..82ed275 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -38,6 +38,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
 (Sangjin Lee via zjshen)
 
+YARN-3034. Implement RM starting its timeline collector. (Naganarasimha G R
+via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7985239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9d5b63b..e72c1c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -373,12 +373,20 @@ public class YarnConfiguration extends Configuration {
 
   /**
*  The setting that controls whether yarn system metrics is published on the
-   *  timeline server or not by RM.
+   *  timeline server or not by RM. This configuration setting is for ATS V1
*/
-  public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED =
-  RM_PREFIX + "system-metrics-publisher.enabled";
+  public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED = RM_PREFIX
+  + "system-metrics-publisher.enabled";
   public static final boolean DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED = 
false;
 
+  /**
+   *  The setting that controls whether yarn system metrics is published on the
+   *  timeline server or not by RM and NM. This configuration setting is for 
ATS V2
+   */
+  public static final String SYSTEM_METRICS_PUBLISHER_ENABLED = YARN_PREFIX
+  + "system-metrics-publisher.enabled";
+  public static final boolean DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED = false;
+
   public static final String RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =
   RM_PREFIX + "system-metrics-publisher.dispatcher.pool-size";
   public static final int 
DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7985239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 78b6ae8..10450a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -746,12 +746,21 @@
 
   
 The setting that controls whether yarn system metrics is
-published on the timeline server or not by RM.
+published to the Timeline server (version one) or not, by RM. 
+This configuration is

[39/50] [abbrv] hadoop git commit: YARN-3993. Changed to use the AM flag in ContainerContext determine AM container in TestPerNodeTimelineCollectorsAuxService. Contributed by Sunil G.

2015-08-25 Thread sjlee
YARN-3993. Changed to use the AM flag in ContainerContext determine AM 
container in TestPerNodeTimelineCollectorsAuxService. Contributed by Sunil G.

(cherry picked from commit 9e48f9ff2ce08f3dcdd8d60bacb697664b92196f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93e636d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93e636d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93e636d

Branch: refs/heads/YARN-2928
Commit: d93e636d1ddb4e63a47e0c43189abc6e6b57f672
Parents: 5b3b86a
Author: Zhijie Shen 
Authored: Mon Aug 3 16:55:44 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../collector/PerNodeTimelineCollectorsAuxService.java | 13 +++--
 .../TestPerNodeTimelineCollectorsAuxService.java   |  5 +
 3 files changed, 11 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93e636d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 69112b5..f5ce32a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -118,6 +118,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3908. Fixed bugs in HBaseTimelineWriterImpl. (Vrushali C and Sangjin
 Lee via zjshen)
 
+YARN-3993. Changed to use the AM flag in ContainerContext determine AM
+container in TestPerNodeTimelineCollectorsAuxService. (Sunil G via zjshen)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93e636d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 3ede97a..befaa83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.api.ContainerContext;
 import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
 import org.apache.hadoop.yarn.server.api.ContainerTerminationContext;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -119,7 +120,7 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   public void initializeContainer(ContainerInitializationContext context) {
 // intercept the event of the AM container being created and initialize the
 // app level collector service
-if (isApplicationMaster(context)) {
+if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
   ApplicationId appId = context.getContainerId().
   getApplicationAttemptId().getApplicationId();
   addApplication(appId);
@@ -135,21 +136,13 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   public void stopContainer(ContainerTerminationContext context) {
 // intercept the event of the AM container being stopped and remove the app
 // level collector service
-if (isApplicationMaster(context)) {
+if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
   ApplicationId appId = context.getContainerId().
   getApplicationAttemptId().getApplicationId();
   removeApplication(appId);
 }
   }
 
-  private boolean isApplicationMaster(ContainerContext context) {
-// TODO this is based on a (shaky) assumption that the container id (the
-// last field of the full container id) for an AM is always 1
-// we want to make this much more reliable
-ContainerId containerId = context.getContainerId();
-return containerId.getContainerId() == 1L;
-  }
-
   @VisibleForTesting
   boolean hasApplication(ApplicationId appId) {
 return collectorManager.

[35/50] [abbrv] hadoop git commit: YARN-3047. [Data Serving] Set up ATS reader with basic request serving structure and lifecycle (Varun Saxena via sjlee)

2015-08-25 Thread sjlee
YARN-3047. [Data Serving] Set up ATS reader with basic request serving 
structure and lifecycle (Varun Saxena via sjlee)

(cherry picked from commit 4c5f88fb0f04b7919738d07598b0f006a9ff91f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e281987f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e281987f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e281987f

Branch: refs/heads/YARN-2928
Commit: e281987f7b78295c8f511b6c63ae8d8b2a7cc8a9
Parents: 89e6c69
Author: Sangjin Lee 
Authored: Wed Jul 8 17:10:10 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|   8 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../hadoop/yarn/webapp/util/WebAppUtils.java|   6 +-
 .../reader/TimelineReaderManager.java   |  36 
 .../reader/TimelineReaderServer.java| 169 +++
 .../reader/TimelineReaderWebServices.java   |  59 +++
 .../reader/TestTimelineReaderServer.java|  51 ++
 .../reader/TestTimelineReaderWebServices.java   | 121 +
 10 files changed, 459 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e281987f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 58232b7..d77ad59 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -76,6 +76,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3051. Created storage oriented reader interface for fetching raw 
entity
 data and made the filesystem based implementation. (Varun Saxena via 
zjshen)
 
+YARN-3047. [Data Serving] Set up ATS reader with basic request serving
+structure and lifecycle (Varun Saxena via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e281987f/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index f0bed9b..b6d1204 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -43,6 +43,7 @@ function hadoop_usage
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager 
daemon"
   hadoop_add_subcommand "timelineserver" "run the timeline server"
+  hadoop_add_subcommand "timelinereader" "run the timeline reader server"
   hadoop_add_subcommand "top" "view cluster information"
   hadoop_add_subcommand "version" "print the version"
   hadoop_generate_usage "${MYNAME}" true
@@ -179,6 +180,10 @@ case "${COMMAND}" in
   HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}"
 fi
   ;;
+  timelinereader)
+supportdaemonization="true"
+
CLASS='org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer'
+  ;;
   version)
 CLASS=org.apache.hadoop.util.VersionInfo
 hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e281987f/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index 91c90fb..4c36307 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -151,7 +151,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt container node queue logs daemonlog 
historyserver ^
- timelineserver classpath
+ timelineserver timelinereader classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -242,6 +242,11 @@ goto :eof
   )
   goto :eof
 
+:timelinereader
+  set 
CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties
+  set 
CLASS=org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer
+  goto :eof
+
 :nodemanager
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
@@ -312,6 +317,7 @@ goto :eof
   @echo   resourcemanager  run the ResourceManager
   @echo   nodemanager  run a nodemanager on each slave
   @echo   timelineserver   run the timeline ser

[41/50] [abbrv] hadoop git commit: YARN-3049. [Storage Implementation] Implement storage reader interface to fetch raw data from HBase backend (Zhijie Shen via sjlee)

2015-08-25 Thread sjlee
YARN-3049. [Storage Implementation] Implement storage reader interface to fetch 
raw data from HBase backend (Zhijie Shen via sjlee)

(cherry picked from commit 07433c2ad52df9e844dbd90020c277d3df844dcd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bdf34f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bdf34f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bdf34f5

Branch: refs/heads/YARN-2928
Commit: 4bdf34f5b1f17e49e5dcf67b219aec65f4608457
Parents: d1a1a22
Author: Sangjin Lee 
Authored: Fri Aug 7 10:00:22 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   6 +
 .../records/timelineservice/TimelineEntity.java |   9 +-
 .../storage/FileSystemTimelineReaderImpl.java   | 164 +++
 .../storage/HBaseTimelineReaderImpl.java| 424 +++
 .../storage/HBaseTimelineWriterImpl.java|  43 +-
 .../storage/TimelineSchemaCreator.java  |  12 +
 .../storage/apptoflow/AppToFlowColumn.java  | 126 ++
 .../apptoflow/AppToFlowColumnFamily.java|  51 +++
 .../storage/apptoflow/AppToFlowRowKey.java  |  39 ++
 .../storage/apptoflow/AppToFlowTable.java   | 110 +
 .../storage/apptoflow/package-info.java |  23 +
 .../storage/common/BaseTable.java   |  16 +
 .../storage/common/ColumnPrefix.java|   2 +-
 .../common/TimelineEntitySchemaConstants.java   |  68 ---
 .../common/TimelineHBaseSchemaConstants.java|  68 +++
 .../storage/common/TimelineReaderUtils.java | 112 +
 .../storage/entity/EntityColumn.java|   2 +-
 .../storage/entity/EntityColumnFamily.java  |   2 +-
 .../storage/entity/EntityColumnPrefix.java  |   2 +-
 .../storage/entity/EntityRowKey.java|  36 +-
 .../storage/entity/EntityTable.java |   8 +-
 .../storage/TestHBaseTimelineWriterImpl.java|  82 +++-
 23 files changed, 1198 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bdf34f5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a484040..2e06793 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -82,6 +82,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3949. Ensure timely flush of timeline writes. (Sangjin Lee via
 junping_du)
 
+YARN-3049. [Storage Implementation] Implement storage reader interface to
+fetch raw data from HBase backend (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bdf34f5/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d25d1d9..5583cd6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -506,4 +506,10 @@
 
 
   
+
+  
+  
+
+ 
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bdf34f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 9ef2d90..0701001 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -29,7 +29,9 @@ import javax.xml.bind.annotation.XmlRootElement;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.Set;
+import java.util.TreeSet;
 
 /**
  * The basic timeline entity data structure for timeline service v2. Timeline
@@ -133,7 +135,8 @@ public class TimelineEntity implements 
Comparable {
   private HashMap info = new HashMap<>();
   private HashMap configs = new HashMap<>();
   private Set metrics = new HashSet<>();
-  private Set events = new 

[45/50] [abbrv] hadoop git commit: MAPREDUCE-6337. Added a mode to replay MR job history files and put them into the timeline service v2. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
MAPREDUCE-6337. Added a mode to replay MR job history files and put them into 
the timeline service v2. Contributed by Sangjin Lee.

(cherry picked from commit 463e070a8e7c882706a96eaa20ea49bfe9982875)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09a8b7b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09a8b7b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09a8b7b9

Branch: refs/heads/YARN-2928
Commit: 09a8b7b94b08572c3e279a9de3103529acbccc1f
Parents: 51029a7
Author: Zhijie Shen 
Authored: Thu May 14 15:16:33 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../hadoop/mapred/JobHistoryFileParser.java |  53 
 .../mapred/JobHistoryFileReplayMapper.java  | 301 +++
 .../hadoop/mapred/SimpleEntityWriter.java   | 139 +
 .../hadoop/mapred/TimelineEntityConverter.java  | 207 +
 .../mapred/TimelineServicePerformanceV2.java| 191 
 .../collector/TimelineCollectorManager.java |   8 +-
 .../storage/FileSystemTimelineWriterImpl.java   |  23 +-
 .../timelineservice/storage/package-info.java   |  24 ++
 9 files changed, 809 insertions(+), 140 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a8b7b9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2805780..9c66a5e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -12,6 +12,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 MAPREDUCE-6335. Created MR job based performance test driver for the
 timeline service v2. (Sangjin Lee via zjshen)
 
+MAPREDUCE-6337. Added a mode to replay MR job history files and put them
+into the timeline service v2. (Sangjin Lee via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a8b7b9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
new file mode 100644
index 000..9d051df
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+
+class JobHistoryFileParser {
+  private static final Log LOG = LogFactory.getLog(JobHistoryFileParser.class);
+
+  private final FileSystem fs;
+
+  public JobHistoryFileParser(FileSystem fs) {
+LOG.info("JobHistoryFileParser created with " + fs);
+this.fs = fs;
+  }
+
+  public JobInfo parseHistoryFile(Path path) throws IOException {
+LOG.info("parsing job history file " + path);
+JobHistoryParser parser = new JobHistoryParser(fs, path);
+return parser.parse();
+  }
+
+  public Configuration parseConfiguration(Path path) throws IOException {
+LOG.info("parsing job configuration file " + path);
+Configuration conf = new Configuration(false);
+conf.addResource(fs.open(path));
+

[19/50] [abbrv] hadoop git commit: YARN-3529. Added mini HBase cluster and Phoenix support to timeline service v2 unit tests. Contributed by Li Lu.

2015-08-25 Thread sjlee
YARN-3529. Added mini HBase cluster and Phoenix support to timeline service v2 
unit tests. Contributed by Li Lu.

(cherry picked from commit 9b0ae9363dbce36f70aa92415a1178f65f3c1dcd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a68cdab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a68cdab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a68cdab

Branch: refs/heads/YARN-2928
Commit: 7a68cdab24df2d584270227beaea8971d94d66c1
Parents: 9b794ab
Author: Zhijie Shen 
Authored: Tue May 12 13:53:38 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:11 2015 -0700

--
 hadoop-project/pom.xml  | 52 
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop-yarn-server-timelineservice/pom.xml  | 47 ++
 .../storage/PhoenixTimelineWriterImpl.java  | 22 ++-
 .../storage/TestPhoenixTimelineWriterImpl.java  | 65 ++--
 5 files changed, 155 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68cdab/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index acc021d..7a35939 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -45,6 +45,9 @@
 
 2.11.0
 
+1.0.1
+4.5.0-SNAPSHOT
+
 ${project.version}
 
 1.0.13
@@ -993,6 +996,55 @@
 
   
 
+
+  org.apache.hbase
+  hbase-client
+  ${hbase.version}
+
+
+  org.apache.phoenix
+  phoenix-core
+  ${phoenix.version}
+  
+
+
+  jline
+  jline
+
+  
+
+
+  org.apache.phoenix
+  phoenix-core
+  test-jar
+  ${phoenix.version}
+  test
+
+
+  org.apache.hbase
+  hbase-it
+  ${hbase.version}
+  test
+  tests
+
+
+  org.apache.hbase
+  hbase-testing-util
+  ${hbase.version}
+  test
+  true
+  
+
+  org.jruby
+  jruby-complete
+
+
+org.apache.hadoop
+hadoop-hdfs
+
+  
+
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68cdab/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a290c22..0b06502 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3134. Implemented Phoenix timeline writer to access HBase backend. (Li
 Lu via zjshen)
 
+YARN-3529. Added mini HBase cluster and Phoenix support to timeline service
+v2 unit tests. (Li Lu via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68cdab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index f62230f..1e914de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -122,21 +122,44 @@
 
 
 
-org.apache.phoenix
-phoenix-core
-4.3.0
-
-  
-  
-jline
-jline
-  
-
+  org.apache.phoenix
+  phoenix-core
 
 
-  com.google.guava
-  guava
+  org.apache.hbase
+  hbase-client
+
+
+
+  org.apache.hadoop
+  hadoop-hdfs
+  test
+
+
+  org.apache.hadoop
+  hadoop-hdfs
+  test-jar
+  test
 
+
+  org.apache.phoenix
+  phoenix-core
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-it
+  test
+  tests
+
+
+  org.apache.hbase
+  hbase-testing-util
+  test
+  true
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68cdab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixTimelineWriterImpl.java
 
b/hadoop-yarn-pr

[16/50] [abbrv] hadoop git commit: MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events and counters. Contributed by Junping Du.

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e58f943/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index eab9026..b3ea26e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -18,22 +18,45 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.File;
+import java.io.IOException;
+
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.jobhistory.EventType;
 import org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import org.junit.Assert;
 import org.junit.Test;
 
 public class TestMRTimelineEventHandling {
 
+  private static final String TIMELINE_AUX_SERVICE_NAME = "timeline_collector";
+  private static final Log LOG =
+LogFactory.getLog(TestMRTimelineEventHandling.class);
+  
   @Test
   public void testTimelineServiceStartInMiniCluster() throws Exception {
 Configuration conf = new YarnConfiguration();
@@ -47,7 +70,7 @@ public class TestMRTimelineEventHandling {
 MiniMRYarnCluster cluster = null;
 try {
   cluster = new MiniMRYarnCluster(
-  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+TestMRTimelineEventHandling.class.getSimpleName(), 1);
   cluster.init(conf);
   cluster.start();
 
@@ -88,7 +111,7 @@ public class TestMRTimelineEventHandling {
 MiniMRYarnCluster cluster = null;
 try {
   cluster = new MiniMRYarnCluster(
-  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+TestMRTimelineEventHandling.class.getSimpleName(), 1);
   cluster.init(conf);
   cluster.start();
   TimelineStore ts = cluster.getApplicationHistoryServer()
@@ -132,6 +155,140 @@ public class TestMRTimelineEventHandling {
   }
 }
   }
+  
+  @Test
+  public void testMRNewTimelineServiceEventHandling() throws Exception {
+LOG.info("testMRNewTimelineServiceEventHandling start.");
+Configuration conf = new YarnConfiguration();
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+
+// enable new timeline serivce in MR side
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_NEW_TIMELINE_SERVICE_ENABLED, 
true);
+
+// enable aux-service based timeline collectors
+conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
+conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + 
TIMELINE_AUX_SERVICE_NAME
+  + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
+
+conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
+
+MiniMRYarnCluster cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  TestMRTimelineEventHandling.class.getSimpleName(), 1, true);
+  cluster.init(conf);
+  cluster.start();
+  LOG.info("A MiniMRYarnCluster get start.");
+
+  Path inDir = new Path("input");
+  Path outDir = new Path("output");
+  LOG.info("Run 1st job which should be successful.");
+  RunningJob job =
+  UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir)

[27/50] [abbrv] hadoop git commit: YARN-3276. Code cleanup for timeline service API records. Contributed by Junping Du.

2015-08-25 Thread sjlee
YARN-3276. Code cleanup for timeline service API records. Contributed by 
Junping Du.

(cherry picked from commit d88f30ba5359f59fb71b93a55e1c1d9a1c0dff8e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dd2397e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dd2397e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dd2397e

Branch: refs/heads/YARN-2928
Commit: 6dd2397e520f18a1bb84d6dc1f19e8762e569040
Parents: 534aae5
Author: Zhijie Shen 
Authored: Wed Jun 3 15:13:29 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../api/records/timeline/TimelineEntity.java| 21 ++
 .../api/records/timeline/TimelineEvent.java |  8 +--
 .../records/timelineservice/TimelineEntity.java | 33 ++---
 .../records/timelineservice/TimelineEvent.java  |  7 +-
 .../records/timelineservice/TimelineMetric.java |  2 +-
 .../hadoop/yarn/util/TimelineServiceHelper.java | 47 
 .../impl/pb/AllocateResponsePBImpl.java |  4 +-
 .../yarn/util/TestTimelineServiceHelper.java| 76 
 9 files changed, 147 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd2397e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5d8aefc..b3e8738 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -84,6 +84,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   IMPROVEMENTS
 
+YARN-3276. Code cleanup for timeline service API records. (Junping Du via
+zjshen)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd2397e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
index a43259b..e695050 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
@@ -34,6 +34,7 @@ import javax.xml.bind.annotation.XmlRootElement;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 
 /**
  * 
@@ -231,11 +232,8 @@ public class TimelineEntity implements 
Comparable {
*/
   public void setRelatedEntities(
   Map> relatedEntities) {
-if (relatedEntities != null && !(relatedEntities instanceof HashMap)) {
-  this.relatedEntities = new HashMap>(relatedEntities);
-} else {
-  this.relatedEntities = (HashMap>) relatedEntities;
-}
+this.relatedEntities = TimelineServiceHelper.mapCastToHashMap(
+relatedEntities);
   }
 
   /**
@@ -297,11 +295,8 @@ public class TimelineEntity implements 
Comparable {
*  a map of primary filters
*/
   public void setPrimaryFilters(Map> primaryFilters) {
-if (primaryFilters != null && !(primaryFilters instanceof HashMap)) {
-  this.primaryFilters = new HashMap>(primaryFilters);
-} else {
-  this.primaryFilters = (HashMap>) primaryFilters;
-}
+this.primaryFilters =
+TimelineServiceHelper.mapCastToHashMap(primaryFilters);
   }
 
   /**
@@ -350,11 +345,7 @@ public class TimelineEntity implements 
Comparable {
*  a map of other information
*/
   public void setOtherInfo(Map otherInfo) {
-if (otherInfo != null && !(otherInfo instanceof HashMap)) {
-  this.otherInfo = new HashMap(otherInfo);
-} else {
-  this.otherInfo = (HashMap) otherInfo;
-}
+this.otherInfo = TimelineServiceHelper.mapCastToHashMap(otherInfo);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd2397e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/a

[48/50] [abbrv] hadoop git commit: YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)

2015-08-25 Thread sjlee
YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e979f308
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e979f308
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e979f308

Branch: refs/heads/YARN-2928
Commit: e979f30831fc87f4c535df045527b466fe584ace
Parents: 89f8fd2
Author: Sangjin Lee 
Authored: Wed Aug 19 17:46:03 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:52:45 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../storage/TestHBaseTimelineStorage.java   | 770 +++
 .../storage/TestHBaseTimelineWriterImpl.java| 770 ---
 3 files changed, 772 insertions(+), 770 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e979f308/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd7f849..0041f7f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -139,6 +139,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3984. Adjusted the event column key schema and avoided missing empty
 event. (Vrushali C via zjshen)
 
+YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e979f308/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
new file mode 100644
index 000..2875e01
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -0,0 +1,770 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.NavigableSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn

[43/50] [abbrv] hadoop git commit: YARN-3904. Refactor timelineservice.storage to add support to online and offline aggregation writers (Li Lu via sjlee)

2015-08-25 Thread sjlee
YARN-3904. Refactor timelineservice.storage to add support to online and 
offline aggregation writers (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97f211b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97f211b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97f211b4

Branch: refs/heads/YARN-2928
Commit: 97f211b485a4fb5f49ce8775d873afba869f8e7c
Parents: 40d9d46
Author: Sangjin Lee 
Authored: Mon Aug 17 16:48:58 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:16 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   7 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../storage/OfflineAggregationWriter.java   |  66 +++
 .../PhoenixOfflineAggregationWriterImpl.java| 356 +
 .../storage/PhoenixTimelineWriterImpl.java  | 530 ---
 .../storage/TimelineSchemaCreator.java  |  45 +-
 .../storage/common/OfflineAggregationInfo.java  | 110 
 ...TestPhoenixOfflineAggregationWriterImpl.java | 162 ++
 .../storage/TestPhoenixTimelineWriterImpl.java  | 152 --
 .../storage/TestTimelineWriterImpl.java |  74 ---
 11 files changed, 754 insertions(+), 761 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97f211b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a4aeef8..3e1f212 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -88,6 +88,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3906. Split the application table from the entity table. (Sangjin Lee 
 via junping_du)
 
+YARN-3904. Refactor timelineservice.storage to add support to online and
+offline aggregation writers (Li Lu via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97f211b4/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5583cd6..691170e 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -488,13 +488,12 @@
   
   
   
-
+
 
   
-  
-  
-  
+  
 
+
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97f211b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2c59bd4..faab8a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1737,6 +1737,16 @@ public class YarnConfiguration extends Configuration {
   public static final longDEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME =
   7*24*60*60*1000; // 7 days
 
+  // Timeline service v2 offlien aggregation related keys
+  public static final String TIMELINE_OFFLINE_AGGREGATION_PREFIX =
+  YarnConfiguration.TIMELINE_SERVICE_PREFIX + "aggregation.offline.";
+  public static final String PHOENIX_OFFLINE_STORAGE_CONN_STR
+  = TIMELINE_OFFLINE_AGGREGATION_PREFIX
+  + "phoenix.connectionString";
+
+  public static final String PHOENIX_OFFLINE_STORAGE_CONN_STR_DEFAULT
+  = "jdbc:phoenix:localhost:2181:/hbase";
+
   // ///
   // Shared Cache Configs
   // ///

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97f211b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/OfflineAggregationWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/OfflineAggregationWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelinese

[17/50] [abbrv] hadoop git commit: MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events and counters. Contributed by Junping Du.

2015-08-25 Thread sjlee
MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events 
and counters. Contributed by Junping Du.

(cherry picked from commit 5eeb2b156f8e108205945f0a1d06873cb51c3527)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e58f943
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e58f943
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e58f943

Branch: refs/heads/YARN-2928
Commit: 8e58f943ffbb9b6c02422984ee2c77ae45948c9e
Parents: 8fd1aa1
Author: Zhijie Shen 
Authored: Tue Apr 21 16:31:33 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:10 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  15 ++
 .../jobhistory/JobHistoryEventHandler.java  | 258 ---
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  23 ++
 .../v2/app/rm/RMContainerAllocator.java |   9 +
 .../hadoop/mapreduce/jobhistory/TestEvents.java |   8 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   9 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java|   5 +
 .../mapreduce/jobhistory/AMStartedEvent.java|  18 ++
 .../mapreduce/jobhistory/HistoryEvent.java  |   4 +
 .../mapreduce/jobhistory/JobFinishedEvent.java  |  25 ++
 .../jobhistory/JobInfoChangeEvent.java  |  11 +
 .../mapreduce/jobhistory/JobInitedEvent.java|  14 +
 .../jobhistory/JobPriorityChangeEvent.java  |  10 +
 .../jobhistory/JobQueueChangeEvent.java |  10 +
 .../jobhistory/JobStatusChangedEvent.java   |  10 +
 .../mapreduce/jobhistory/JobSubmittedEvent.java |  23 ++
 .../JobUnsuccessfulCompletionEvent.java |  16 ++
 .../jobhistory/MapAttemptFinishedEvent.java |  24 +-
 .../jobhistory/NormalizedResourceEvent.java |  11 +
 .../jobhistory/ReduceAttemptFinishedEvent.java  |  25 +-
 .../jobhistory/TaskAttemptFinishedEvent.java|  19 ++
 .../jobhistory/TaskAttemptStartedEvent.java |  18 ++
 .../TaskAttemptUnsuccessfulCompletionEvent.java |  24 ++
 .../mapreduce/jobhistory/TaskFailedEvent.java   |  19 ++
 .../mapreduce/jobhistory/TaskFinishedEvent.java |  18 ++
 .../mapreduce/jobhistory/TaskStartedEvent.java  |  12 +
 .../mapreduce/jobhistory/TaskUpdatedEvent.java  |  10 +
 .../mapreduce/util/JobHistoryEventUtils.java|  51 
 .../src/main/resources/mapred-default.xml   |   7 +
 .../mapred/TestMRTimelineEventHandling.java | 163 +++-
 .../hadoop/mapreduce/v2/MiniMRYarnCluster.java  |  21 +-
 31 files changed, 838 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e58f943/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 305b29e..5ac0d3b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,20 @@
 Hadoop MapReduce Change Log
 
+Branch YARN-2928: Timeline Server Next Generation: Phase 1
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history
+events and counters. (Junping Du via zjshen)
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e58f943/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index b0bcfcd..1e84e44 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -19,6 +19,9 @@
 package org.apache.hadoop.mapreduce.jobhistory;
 
 import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -49,11 +52,13 @@ import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.util.JobHi

[20/50] [abbrv] hadoop git commit: YARN-3134. Implemented Phoenix timeline writer to access HBase backend. Contributed by Li Lu.

2015-08-25 Thread sjlee
YARN-3134. Implemented Phoenix timeline writer to access HBase backend. 
Contributed by Li Lu.

(cherry picked from commit b3b791be466be79e4e964ad068f7a6ec701e22e1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b794ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b794ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b794ab1

Branch: refs/heads/YARN-2928
Commit: 9b794ab16dde75a088469f6393aab73f198e2a9a
Parents: ed95a79
Author: Zhijie Shen 
Authored: Fri May 8 19:08:02 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|  11 +
 .../hadoop-yarn-server-timelineservice/pom.xml  |  17 +
 .../collector/TimelineCollector.java|  13 +-
 .../collector/TimelineCollectorManager.java |  19 +
 .../storage/PhoenixTimelineWriterImpl.java  | 509 +++
 .../storage/TestPhoenixTimelineWriterImpl.java  | 125 +
 .../storage/TestTimelineWriterImpl.java |  74 +++
 8 files changed, 760 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b794ab1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82f95d6..a290c22 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -64,6 +64,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3562. unit tests failures and issues found from findbug from earlier
 ATS checkins (Naganarasimha G R via sjlee)
 
+YARN-3134. Implemented Phoenix timeline writer to access HBase backend. (Li
+Lu via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b794ab1/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 114851f..d25d1d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -485,6 +485,17 @@
 
 
   
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b794ab1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index f974aee..f62230f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -120,6 +120,23 @@
   mockito-all
   test
 
+
+
+org.apache.phoenix
+phoenix-core
+4.3.0
+
+  
+  
+jline
+jline
+  
+
+
+
+  com.google.guava
+  guava
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b794ab1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 4eced5b..bb7db12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -27,11 +27,8 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.timeline

[15/50] [abbrv] hadoop git commit: YARN-3551. Consolidate data model change according to the backend implementation (Zhijie Shen via sale)

2015-08-25 Thread sjlee
YARN-3551. Consolidate data model change according to the backend 
implementation (Zhijie Shen via sale)

(cherry picked from commit 557a3950bddc837469244835f5577899080115d8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84147698
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84147698
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84147698

Branch: refs/heads/YARN-2928
Commit: 841476988c54bdb42a9837951e7cc36a2f6bb38a
Parents: 8f0b1ca
Author: Sangjin Lee 
Authored: Mon May 4 16:10:20 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:10 2015 -0700

--
 .../mapred/TimelineServicePerformanceV2.java|   2 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineEntity.java |  16 +--
 .../records/timelineservice/TimelineMetric.java | 131 +--
 .../TestTimelineServiceRecords.java |  81 +---
 .../monitor/ContainersMonitorImpl.java  |   4 +-
 .../TestTimelineServiceClientIntegration.java   |   6 +
 7 files changed, 146 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84147698/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
index de46617..1c2e28d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
@@ -261,7 +261,7 @@ public class TimelineServicePerformanceV2 extends 
Configured implements Tool {
   // add a metric
   TimelineMetric metric = new TimelineMetric();
   metric.setId("foo_metric");
-  metric.setSingleData(123456789L);
+  metric.addValue(System.currentTimeMillis(), 123456789L);
   entity.addMetric(metric);
   // add a config
   entity.addConfig("foo", "bar");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84147698/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 63243fb..41ed1ec 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3431. Sub resources of timeline entity needs to be passed to a 
separate 
 endpoint. (Zhijie Shen via junping_du)
 
+YARN-3551. Consolidate data model change according to the backend
+implementation (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84147698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 6cab753..3be7f52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -80,7 +80,7 @@ public class TimelineEntity {
   private TimelineEntity real;
   private Identifier identifier;
   private HashMap info = new HashMap<>();
-  private HashMap configs = new HashMap<>();
+  private HashMap configs = new HashMap<>();
   private Set metrics = new HashSet<>();
   private Set events = new HashSet<>();
   private HashMap> isRelatedToEntities = new HashMap<>();
@@ -213,7 +213,7 @@ public class TimelineEntity {
   // required by JAXB
   @InterfaceAudience.Private
   @XmlElement(name = "configs")
-  public HashMap getConfigsJAXB() {
+  public HashMap getConfigsJAXB() {
 if (real == null) {
   return configs;
 } else {
@@ -221,7 +221,7 @@ public class TimelineEntity {
 }
   }

[37/50] [abbrv] hadoop git commit: YARN-3906. Split the application table from the entity table. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51029a75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index ab02779..95f88d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -47,6 +48,10 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineWriterUtils;
@@ -60,7 +65,15 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
- * @throws Exception
+ * Various tests to test writing entities to HBase and reading them back from
+ * it.
+ *
+ * It uses a single HBase mini-cluster for all tests which is a little more
+ * realistic, and helps test correctness in the presence of other data.
+ *
+ * Each test uses a different cluster name to be able to handle its own data
+ * even if other records exist in the table. Use a different cluster name if
+ * you add a new test.
  */
 public class TestHBaseTimelineWriterImpl {
 
@@ -78,6 +91,199 @@ public class TestHBaseTimelineWriterImpl {
 .createTable(util.getHBaseAdmin(), util.getConfiguration());
 new AppToFlowTable()
 .createTable(util.getHBaseAdmin(), util.getConfiguration());
+new ApplicationTable()
+.createTable(util.getHBaseAdmin(), util.getConfiguration());
+  }
+
+  @Test
+  public void testWriteApplicationToHBase() throws Exception {
+TimelineEntities te = new TimelineEntities();
+ApplicationEntity entity = new ApplicationEntity();
+String id = "hello";
+entity.setId(id);
+Long cTime = 1425016501000L;
+Long mTime = 1425026901000L;
+entity.setCreatedTime(cTime);
+entity.setModifiedTime(mTime);
+
+// add the info map in Timeline Entity
+Map infoMap = new HashMap();
+infoMap.put("infoMapKey1", "infoMapValue1");
+infoMap.put("infoMapKey2", 10);
+entity.addInfo(infoMap);
+
+// add the isRelatedToEntity info
+String key = "task";
+String value = "is_related_to_entity_id_here";
+Set isRelatedToSet = new HashSet();
+isRelatedToSet.add(value);
+Map> isRelatedTo = new HashMap>();
+isRelatedTo.put(key, isRelatedToSet);
+entity.setIsRelatedToEntities(isRelatedTo);
+
+// add the relatesTo info
+key = "container";
+value = "relates_to_entity_id_here";
+Set relatesToSet = new HashSet();
+relatesToSet.add(value);
+value = "relates_to_entity_id_here_Second";
+relatesToSet.add(value);
+Map> relatesTo = new HashMap>();
+relatesTo.put(key, relatesToSet);
+entity.setRelatesToEntities(relatesTo);
+
+// add some config entries
+Map conf = new HashMap();
+conf.put("config_param1", "value1");
+conf.put("config_param2", "value2");
+entity.addConfigs(conf);
+
+// add metrics
+Set metrics = new HashSet<>();
+TimelineMetric m1 = new TimelineMetric();
+m1.setId("MAP_SLOT_MILLIS");
+  

[22/50] [abbrv] hadoop git commit: YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its test data (Vrushali C via sjlee)

2015-08-25 Thread sjlee
YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its test 
data (Vrushali C via sjlee)

(cherry picked from commit 4c0b6d73914f2e249795deb292f508177ea54884)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0d1cc01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0d1cc01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0d1cc01

Branch: refs/heads/YARN-2928
Commit: d0d1cc014707ee99f53d902248eb77a5ec6f3bcc
Parents: b3edd63
Author: Sangjin Lee 
Authored: Wed May 27 20:28:04 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  |  4 
 .../storage/TestHBaseTimelineWriterImpl.java | 15 ---
 2 files changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d1cc01/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3df01e9..bc3880c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -75,6 +75,10 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-3411. [Storage implementation] explore the native HBase write schema
 for storage (Vrushali C via sjlee)
+
+YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
+test data (Vrushali C via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d1cc01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index 48bacd6..f999b4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -121,12 +121,13 @@ public class TestHBaseTimelineWriterImpl {
 TimelineMetric m1 = new TimelineMetric();
 m1.setId("MAP_SLOT_MILLIS");
 Map metricValues = new HashMap();
-metricValues.put(1429741609000L, 1);
-metricValues.put(1429742609000L, 2);
-metricValues.put(1429743609000L, 3);
-metricValues.put(1429744609000L, 4);
-metricValues.put(1429745609000L, 500L);
-metricValues.put(1429746609000L, 600L);
+long ts = System.currentTimeMillis();
+metricValues.put(ts - 12, 1);
+metricValues.put(ts - 10, 2);
+metricValues.put(ts - 8, 3);
+metricValues.put(ts - 6, 4);
+metricValues.put(ts - 4, 500L);
+metricValues.put(ts - 2, 600L);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
@@ -216,7 +217,7 @@ public class TestHBaseTimelineWriterImpl {
   private void checkMetricsTimeseries(List metricCells,
   TimelineMetric m1) throws IOException {
 Map timeseries = m1.getValues();
-assertEquals(metricCells.size(), timeseries.size());
+assertEquals(timeseries.size(), metricCells.size());
 for (Cell c1 : metricCells) {
   assertTrue(timeseries.containsKey(c1.getTimestamp()));
   assertEquals(GenericObjectMapper.read(CellUtil.cloneValue(c1)),



[06/50] [abbrv] hadoop git commit: YARN-3333. Rename TimelineAggregator etc. to TimelineCollector. Contributed by Sangjin Lee

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c7210c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
new file mode 100644
index 000..009fa63
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.collectormanager;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoResponse;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+
+public class NMCollectorService extends CompositeService implements
+CollectorNodemanagerProtocol {
+
+  private static final Log LOG = LogFactory.getLog(NMCollectorService.class);
+
+  final Context context;
+
+  private Server server;
+
+  public NMCollectorService(Context context) {
+
+super(NMCollectorService.class.getName());
+this.context = context;
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+Configuration conf = getConfig();
+
+InetSocketAddress collectorServerAddress = conf.getSocketAddr(
+YarnConfiguration.NM_BIND_HOST,
+YarnConfiguration.NM_COLLECTOR_SERVICE_ADDRESS,
+YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_ADDRESS,
+YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_PORT);
+
+Configuration serverConf = new Configuration(conf);
+
+// TODO Security settings.
+YarnRPC rpc = YarnRPC.create(conf);
+
+server =
+rpc.getServer(CollectorNodemanagerProtocol.class, this,
+collectorServerAddress, serverConf,
+this.context.getNMTokenSecretManager(),
+conf.getInt(YarnConfiguration.NM_COLLECTOR_SERVICE_THREAD_COUNT,
+YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
+
+server.start();
+// start remaining services
+super.serviceStart();
+LOG.info("NMCollectorService started at " + collectorServerAddress);
+  }
+
+
+  @Override
+  public void serviceStop() throws Exception {
+if (server != null) {
+  server.stop();
+}
+// TODO may cleanup app collectors running on this NM in future.
+super.serviceStop();
+  }
+
+  @Override
+  public ReportNewCollectorInfoResponse reportNewCollectorInfo(
+  ReportNewCollectorInfoRequest request) throws IOException {
+List newCollectorsList = request.getAppCollectorsList();
+if (newCollectorsList != null && !newCollectorsList.isEmpty()) {
+  Map newCollectorsMap =
+  new HashMap();
+  for (AppCollectorsMap collector : newCollectorsList) {
+newCollectorsMap.put(collector.getApplicationId(), 
collector.getCollectorAddr());
+  }
+  
((NodeManager.NMContext)context).addRegisteredC

[12/50] [abbrv] hadoop git commit: YARN-3391. Clearly define flow ID/ flow run / flow version in API and storage. Contributed by Zhijie Shen

2015-08-25 Thread sjlee
YARN-3391. Clearly define flow ID/ flow run / flow version in API and storage. 
Contributed by Zhijie Shen

(cherry picked from commit 68c6232f8423e55b4d152ef3d1d66aeb2d6a555e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84389771
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84389771
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84389771

Branch: refs/heads/YARN-2928
Commit: 84389771f99d3630e2c81bd6fcdc0f3e03fd2f93
Parents: 462c48a
Author: Junping Du 
Authored: Thu Apr 9 18:04:27 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../applications/distributedshell/Client.java   | 36 +--
 .../distributedshell/TestDistributedShell.java  | 13 +++
 .../yarn/util/timeline/TimelineUtils.java   | 34 +++---
 .../GetTimelineCollectorContextResponse.java| 17 +
 ...tTimelineCollectorContextResponsePBImpl.java | 38 +---
 .../yarn_server_common_service_protos.proto |  5 +--
 .../java/org/apache/hadoop/yarn/TestRPC.java|  7 ++--
 .../collectormanager/NMCollectorService.java|  2 +-
 .../containermanager/ContainerManagerImpl.java  | 18 ++
 .../application/Application.java|  6 ++--
 .../application/ApplicationImpl.java| 27 +-
 .../application/TestApplication.java|  2 +-
 .../yarn/server/nodemanager/webapp/MockApp.java | 23 +---
 .../nodemanager/webapp/TestNMWebServices.java   |  2 +-
 .../server/resourcemanager/ClientRMService.java | 21 +++
 .../resourcemanager/amlauncher/AMLauncher.java  | 30 
 .../TestTimelineServiceClientIntegration.java   |  2 +-
 .../collector/AppLevelTimelineCollector.java| 10 +++---
 .../collector/TimelineCollector.java|  4 +--
 .../collector/TimelineCollectorContext.java | 32 +++--
 .../collector/TimelineCollectorManager.java | 15 
 .../storage/FileSystemTimelineWriterImpl.java   | 13 +++
 .../timelineservice/storage/TimelineWriter.java |  7 ++--
 ...TestPerNodeTimelineCollectorsAuxService.java |  2 +-
 .../collector/TestTimelineCollectorManager.java |  3 +-
 .../TestFileSystemTimelineWriterImpl.java   |  8 +++--
 27 files changed, 256 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84389771/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a3a1f14..aea859a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -50,6 +50,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3334. NM uses timeline client to publish container metrics to new
 timeline service. (Junping Du via zjshen)
 
+YARN-3391. Clearly define flow ID/ flow run / flow version in API and 
storage.
+(Zhijie Shen via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84389771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index db69490..ff2f594 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -185,8 +185,9 @@ public class Client {
   // Timeline domain writer access control
   private String modifyACLs = null;
 
-  private String flowId = null;
-  private String flowRunId = null;
+  private String flowName = null;
+  private String flowVersion = null;
+  private long flowRunId = 0L;
 
   // Command line options
   private Options opts;
@@ -289,9 +290,11 @@ public class Client {
 + "modify the timeline entities in the given domain");
 opts.addOption("create", false, "Flag to indicate whether to create the "
 + "domain specified with -domain.");
-opts.addOption("flow", true, "ID of the flow which the distr

[31/50] [abbrv] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-08-25 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ed0625/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
new file mode 100644
index 000..ee57890
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Used to separate row qualifiers, column qualifiers and compount fields.
+ */
+public enum Separator {
+
+  /**
+   * separator in key or column qualifier fields
+   */
+  QUALIFIERS("!", "%0$"),
+
+  /**
+   * separator in values, and/or compound key/column qualifier fields.
+   */
+  VALUES("?", "%1$"),
+
+  /**
+   * separator in values, often used to avoid having these in qualifiers and
+   * names. Note that if we use HTML form encoding through URLEncoder, we end 
up
+   * getting a + for a space, which may already occur in strings, so we don't
+   * want that.
+   */
+  SPACE(" ", "%2$");
+
+  /**
+   * The string value of this separator.
+   */
+  private final String value;
+
+  /**
+   * The URLEncoded version of this separator
+   */
+  private final String encodedValue;
+
+  /**
+   * The bye representation of value.
+   */
+  private final byte[] bytes;
+
+  /**
+   * The value quoted so that it can be used as a safe regex
+   */
+  private final String quotedValue;
+
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  /**
+   * @param value of the separator to use. Cannot be null or empty string.
+   * @param encodedValue choose something that isn't likely to occur in the 
data
+   *  itself. Cannot be null or empty string.
+   */
+  private Separator(String value, String encodedValue) {
+this.value = value;
+this.encodedValue = encodedValue;
+
+// validation
+if (value == null || value.length() == 0 || encodedValue == null
+|| encodedValue.length() == 0) {
+  throw new IllegalArgumentException(
+  "Cannot create separator from null or empty string.");
+}
+
+this.bytes = Bytes.toBytes(value);
+this.quotedValue = Pattern.quote(value);
+  }
+
+  /**
+   * Used to make token safe to be used with this separator without collisions.
+   *
+   * @param token
+   * @return the token with any occurrences of this separator URLEncoded.
+   */
+  public String encode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(value, encodedValue);
+  }
+
+  /**
+   * @param token
+   * @return the token with any occurrences of the encoded separator replaced 
by
+   * the separator itself.
+   */
+  public String decode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(encodedValue, value);
+  }
+
+  /**
+   * Encode the given separators in the token with their encoding equivalent.
+   * This means that when encoding is already present in the token itself, this
+   * is not a reversible process. See also {@link #decode(String, 
Separator...)}
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *  equivalent.
+   * @return non-null byte representation of 

[29/50] [abbrv] hadoop git commit: YARN-3051. Created storage oriented reader interface for fetching raw entity data and made the filesystem based implementation. Contributed by Varun Saxena.

2015-08-25 Thread sjlee
YARN-3051. Created storage oriented reader interface for fetching raw entity 
data and made the filesystem based implementation. Contributed by Varun Saxena.

(cherry picked from commit 499ce52c7b645ec0b1cc8ac62dc9a3127b987a20)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89e6c693
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89e6c693
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89e6c693

Branch: refs/heads/YARN-2928
Commit: 89e6c6932a57cf69b1d97f8e6ac6ccd464143327
Parents: 5751abd
Author: Zhijie Shen 
Authored: Mon Jul 6 18:11:27 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineEntity.java |   5 +
 .../storage/FileSystemTimelineReaderImpl.java   | 490 
 .../timelineservice/storage/TimelineReader.java | 162 ++
 .../TestFileSystemTimelineReaderImpl.java   | 556 +++
 5 files changed, 1216 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89e6c693/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a294fc5..58232b7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -73,6 +73,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3801. [JDK-8] Exclude jdk.tools from hbase-client and
 hbase-testing-util (Tsuyoshi Ozawa via sjlee)
 
+YARN-3051. Created storage oriented reader interface for fetching raw 
entity
+data and made the filesystem based implementation. (Varun Saxena via 
zjshen)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89e6c693/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index a641f32..60fba85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.records.timelineservice;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
+import org.codehaus.jackson.annotate.JsonSetter;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
@@ -335,6 +336,7 @@ public class TimelineEntity {
 }
   }
 
+  @JsonSetter("isrelatedto")
   public void setIsRelatedToEntities(
   Map> isRelatedToEntities) {
 if (real == null) {
@@ -423,6 +425,7 @@ public class TimelineEntity {
 }
   }
 
+  @JsonSetter("relatesto")
   public void setRelatesToEntities(Map> relatesToEntities) 
{
 if (real == null) {
   this.relatesToEntities =
@@ -441,6 +444,7 @@ public class TimelineEntity {
 }
   }
 
+  @JsonSetter("createdtime")
   public void setCreatedTime(long createdTime) {
 if (real == null) {
   this.createdTime = createdTime;
@@ -458,6 +462,7 @@ public class TimelineEntity {
 }
   }
 
+  @JsonSetter("modifiedtime")
   public void setModifiedTime(long modifiedTime) {
 if (real == null) {
   this.modifiedTime = modifiedTime;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89e6c693/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
new file mode 100644
index 000..f9f1d1d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/jav

[32/50] [abbrv] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-08-25 Thread sjlee
YARN-3706. Generalize native HBase writer for additional tables (Joep 
Rottinghuis via sjlee)

(cherry picked from commit 9137aeae0dec83f9eff40d12cae712dfd508c0c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42ed0625
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42ed0625
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42ed0625

Branch: refs/heads/YARN-2928
Commit: 42ed0625da34ef8d4794140289c102920259eaf0
Parents: 623b3ab
Author: Sangjin Lee 
Authored: Thu Jun 18 10:49:20 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/EntityColumnDetails.java| 110 --
 .../storage/EntityColumnFamily.java |  95 -
 .../storage/HBaseTimelineWriterImpl.java| 114 +++---
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 134 +---
 .../timelineservice/storage/TimelineWriter.java |   3 +-
 .../storage/TimelineWriterUtils.java| 344 ---
 .../storage/common/BaseTable.java   | 118 +++
 .../common/BufferedMutatorDelegator.java|  73 
 .../timelineservice/storage/common/Column.java  |  59 
 .../storage/common/ColumnFamily.java|  34 ++
 .../storage/common/ColumnHelper.java| 247 +
 .../storage/common/ColumnPrefix.java|  83 +
 .../timelineservice/storage/common/Range.java   |  59 
 .../storage/common/Separator.java   | 303 
 .../common/TimelineEntitySchemaConstants.java   |  68 
 .../storage/common/TimelineWriterUtils.java | 127 +++
 .../storage/common/TypedBufferedMutator.java|  28 ++
 .../storage/common/package-info.java|  24 ++
 .../storage/entity/EntityColumn.java| 141 
 .../storage/entity/EntityColumnFamily.java  |  65 
 .../storage/entity/EntityColumnPrefix.java  | 212 
 .../storage/entity/EntityRowKey.java|  93 +
 .../storage/entity/EntityTable.java | 161 +
 .../storage/entity/package-info.java|  25 ++
 .../storage/TestHBaseTimelineWriterImpl.java| 252 --
 .../storage/common/TestSeparator.java   | 129 +++
 .../storage/common/TestTimelineWriterUtils.java |  29 ++
 30 files changed, 2301 insertions(+), 962 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ed0625/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1cf1f26..1e85cdf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -93,6 +93,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via
 zjshen)
 
+YARN-3706. Generalize native HBase writer for additional tables (Joep
+Rottinghuis via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ed0625/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
deleted file mode 100644
index 2894c41..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIO

[25/50] [abbrv] hadoop git commit: YARN-3044. Made RM write app, attempt and optional container lifecycle events to timeline service v2. Contributed by Naganarasimha G R.

2015-08-25 Thread sjlee
YARN-3044. Made RM write app, attempt and optional container lifecycle events 
to timeline service v2. Contributed by Naganarasimha G R.

(cherry picked from commit 17842a3f61ed33ec831a889c11a74d4814be73ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a22d26c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a22d26c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a22d26c6

Branch: refs/heads/YARN-2928
Commit: a22d26c6d39f8967fc1b761ad47d21c76019c501
Parents: 6dd2397
Author: Zhijie Shen 
Authored: Sat Jun 13 11:32:41 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineEntity.java |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../distributedshell/TestDistributedShell.java  | 118 +++--
 .../metrics/ContainerMetricsConstants.java  |  10 +
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../AbstractTimelineServicePublisher.java   | 179 
 .../metrics/ApplicationFinishedEvent.java   |  13 +-
 .../metrics/SystemMetricsPublisher.java | 431 ---
 .../metrics/TimelineServiceV1Publisher.java | 270 
 .../metrics/TimelineServiceV2Publisher.java | 301 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |   2 -
 .../TestRMAppLogAggregationStatus.java  |   2 +-
 .../metrics/TestSystemMetricsPublisher.java |   8 +-
 .../TestSystemMetricsPublisherForV2.java| 374 
 .../storage/FileSystemTimelineWriterImpl.java   |   4 +-
 16 files changed, 1331 insertions(+), 399 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a22d26c6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b3e8738..a673fb2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -82,6 +82,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3721. build is broken on YARN-2928 branch due to possible dependency
 cycle (Li Lu via sjlee)
 
+YARN-3044. Made RM write app, attempt and optional container lifecycle
+events to timeline service v2. (Naganarasimha G R via zjshen)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a22d26c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index defadec..a641f32 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -470,4 +470,7 @@ public class TimelineEntity {
 return real == null ? this : real;
   }
 
+  public String toString() {
+return identifier.toString();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a22d26c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e26b13f..8640864 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -388,6 +388,16 @@ public class YarnConfiguration extends Configuration {
   + "system-metrics-publisher.enabled";
   public static final boolean DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED = false;
 
+  /**
+   * The setting that controls whether yarn container metrics is published to
+   * the timeline server or not by RM. This configuration setting is for ATS
+   * V2
+   */
+  public static final String RM_PUBLISH_CONTAINER_METRICS_ENABLED = YARN_PREFIX
+  + "rm.system-metrics-publisher.emit-container-events";

[21/50] [abbrv] hadoop git commit: YARN-3562. unit tests failures and issues found from findbug from earlier ATS checkins (Naganarasimha G R via sjlee)

2015-08-25 Thread sjlee
YARN-3562. unit tests failures and issues found from findbug from earlier ATS 
checkins (Naganarasimha G R via sjlee)

(cherry picked from commit d4a23625b1e9a2c4cefd5fa68c28549ba6c1bc2e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed95a79f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed95a79f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed95a79f

Branch: refs/heads/YARN-2928
Commit: ed95a79fe5962c07a0aa9292963c061227e8c06b
Parents: 8414769
Author: Sangjin Lee 
Authored: Wed May 6 20:31:50 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../server/resourcemanager/ResourceTrackerService.java  |  4 ++--
 .../timelineservice/RMTimelineCollectorManager.java |  2 +-
 .../yarn/server/resourcemanager/TestAppManager.java |  5 +
 .../server/resourcemanager/TestClientRMService.java | 12 +++-
 .../TestRMAppLogAggregationStatus.java  |  4 
 .../metrics/TestSystemMetricsPublisher.java |  2 +-
 .../resourcemanager/rmapp/TestRMAppTransitions.java |  5 +
 .../org/apache/hadoop/yarn/server/MiniYARNCluster.java  |  2 ++
 .../storage/FileSystemTimelineWriterImpl.java   |  7 +--
 10 files changed, 39 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed95a79f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 41ed1ec..82f95d6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -61,6 +61,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3551. Consolidate data model change according to the backend
 implementation (Zhijie Shen via sjlee)
 
+YARN-3562. unit tests failures and issues found from findbug from earlier
+ATS checkins (Naganarasimha G R via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed95a79f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index ad38a71..b873752 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -538,8 +538,8 @@ public class ResourceTrackerService extends AbstractService 
implements
 appId + " is not found in RMContext!");
   } else {
 String previousCollectorAddr = rmApp.getCollectorAddr();
-if (previousCollectorAddr == null ||
-previousCollectorAddr != collectorAddr) {
+if (previousCollectorAddr == null
+|| !previousCollectorAddr.equals(collectorAddr)) {
   // sending collector update event.
   RMAppCollectorUpdateEvent event =
   new RMAppCollectorUpdateEvent(appId, collectorAddr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed95a79f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
index 25e0e0f..7d1b657 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourc

[13/50] [abbrv] hadoop git commit: YARN-3334. NM uses timeline client to publish container metrics to new timeline service. Contributed by Junping Du.

2015-08-25 Thread sjlee
YARN-3334. NM uses timeline client to publish container metrics to new timeline 
service. Contributed by Junping Du.

(cherry picked from commit ae0a11167265c126f8127cce15b4b717e3a8767c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/462c48ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/462c48ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/462c48ab

Branch: refs/heads/YARN-2928
Commit: 462c48ab7c3b0b1547e021055951fd355a6c1dee
Parents: a9c81ac
Author: Zhijie Shen 
Authored: Mon Apr 6 09:31:24 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../HierarchicalTimelineEntity.java |   4 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 ++
 .../distributedshell/ApplicationMaster.java |  32 +++--
 .../distributedshell/TestDistributedShell.java  |  47 +--
 .../client/api/impl/TimelineClientImpl.java |  11 +-
 .../hadoop/yarn/server/nodemanager/Context.java |  10 +-
 .../yarn/server/nodemanager/NodeManager.java|  33 ++---
 .../nodemanager/NodeStatusUpdaterImpl.java  |  49 +++-
 .../collectormanager/NMCollectorService.java|  11 +-
 .../application/Application.java|   3 +
 .../application/ApplicationImpl.java|  26 +++-
 .../monitor/ContainersMonitorImpl.java  | 123 ++-
 .../yarn/server/nodemanager/TestEventFlow.java  |   2 +-
 .../nodemanager/TestNodeStatusUpdater.java  |   8 +-
 .../BaseContainerManagerTest.java   |   2 +-
 .../TestContainerManagerRecovery.java   |   4 +-
 .../launcher/TestContainerLaunch.java   |   6 +-
 .../TestLocalCacheDirectoryManager.java |   2 +-
 .../TestResourceLocalizationService.java|   4 +-
 .../yarn/server/nodemanager/webapp/MockApp.java |   8 +-
 .../webapp/TestContainerLogsPage.java   |  12 +-
 .../nodemanager/webapp/TestNMWebServer.java |  10 +-
 .../nodemanager/webapp/TestNMWebServices.java   |   2 +-
 .../webapp/TestNMWebServicesApps.java   |   2 +-
 .../webapp/TestNMWebServicesContainers.java |   2 +-
 .../resourcemanager/ResourceTrackerService.java |  20 ++-
 .../collector/TimelineCollectorWebService.java  |   2 +-
 28 files changed, 334 insertions(+), 112 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/462c48ab/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 76fa0a8..a3a1f14 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -47,6 +47,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3374. Collector's web server should randomly bind an available port. (
 Zhijie Shen via junping_du)
 
+YARN-3334. NM uses timeline client to publish container metrics to new
+timeline service. (Junping Du via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/462c48ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/HierarchicalTimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/HierarchicalTimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/HierarchicalTimelineEntity.java
index 01d85cf..49576de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/HierarchicalTimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/HierarchicalTimelineEntity.java
@@ -58,7 +58,9 @@ public abstract class HierarchicalTimelineEntity extends 
TimelineEntity {
 
   // required by JAXB
   @InterfaceAudience.Private
-  @XmlElement(name = "children")
+  // comment out XmlElement here because it cause UnrecognizedPropertyException
+  // TODO we need a better fix
+  //@XmlElement(name = "children")
   public HashMap> getChildrenJAXB() {
 return children;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/462c48ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/

[10/50] [abbrv] hadoop git commit: YARN-3431. Sub resources of timeline entity needs to be passed to a separate endpoint. Contributed By Zhijie Shen.

2015-08-25 Thread sjlee
YARN-3431. Sub resources of timeline entity needs to be passed to a separate 
endpoint. Contributed By Zhijie Shen.

(cherry picked from commit fa5cc75245a6dba549620a8b26c7b4a8aed9838e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd1aa15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd1aa15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd1aa15

Branch: refs/heads/YARN-2928
Commit: 8fd1aa15a9d11f5cd586ee98d5fc65b623e48f5c
Parents: e9e8d85
Author: Junping Du 
Authored: Mon Apr 27 11:28:32 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ApplicationAttemptEntity.java   |  13 +-
 .../timelineservice/ApplicationEntity.java  |  22 +-
 .../records/timelineservice/ClusterEntity.java  |  12 +-
 .../timelineservice/ContainerEntity.java|  13 +-
 .../api/records/timelineservice/FlowEntity.java |  80 +++--
 .../HierarchicalTimelineEntity.java | 124 +++
 .../records/timelineservice/QueueEntity.java|  36 +++
 .../records/timelineservice/TimelineEntity.java | 322 +++
 .../records/timelineservice/TimelineQueue.java  |  35 --
 .../records/timelineservice/TimelineUser.java   |  35 --
 .../api/records/timelineservice/UserEntity.java |  36 +++
 .../TestTimelineServiceRecords.java |  91 --
 .../TestTimelineServiceClientIntegration.java   |  44 ++-
 .../collector/TimelineCollectorWebService.java  |  65 +++-
 15 files changed, 654 insertions(+), 277 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd1aa15/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 43d292a..63243fb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -55,6 +55,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)
 
+YARN-3431. Sub resources of timeline entity needs to be passed to a 
separate 
+endpoint. (Zhijie Shen via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd1aa15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
index 9dc0c1d..734c741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
@@ -20,16 +20,17 @@ package org.apache.hadoop.yarn.api.records.timelineservice;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement(name = "appattempt")
-@XmlAccessorType(XmlAccessType.NONE)
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public class ApplicationAttemptEntity extends HierarchicalTimelineEntity {
   public ApplicationAttemptEntity() {
 super(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString());
   }
+
+  public ApplicationAttemptEntity(TimelineEntity entity) {
+super(entity);
+if 
(!entity.getType().equals(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString()))
 {
+  throw new IllegalArgumentException("Incompatible entity type: " + 
getId());
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd1aa15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
index 45ec520..183d8d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/record

[36/50] [abbrv] hadoop git commit: YARN-3908. Fixed bugs in HBaseTimelineWriterImpl. Contributed by Vrushali C and Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3908. Fixed bugs in HBaseTimelineWriterImpl. Contributed by Vrushali C and 
Sangjin Lee.

(cherry picked from commit df0ec473a84871b0effd7ca6faac776210d7df09)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b3b86a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b3b86a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b3b86a2

Branch: refs/heads/YARN-2928
Commit: 5b3b86a2571c26cdfb23720508ba604e46acfba6
Parents: 57f6d06
Author: Zhijie Shen 
Authored: Mon Jul 27 15:50:28 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../records/timelineservice/TimelineEvent.java  |  4 +-
 .../storage/HBaseTimelineWriterImpl.java| 18 ++-
 .../storage/common/ColumnHelper.java| 21 
 .../storage/common/ColumnPrefix.java|  7 +--
 .../storage/common/Separator.java   |  7 +++
 .../storage/entity/EntityColumnPrefix.java  | 15 --
 .../storage/entity/EntityTable.java |  6 ++-
 .../storage/TestHBaseTimelineWriterImpl.java| 56 ++--
 9 files changed, 111 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3b86a2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fd19320..69112b5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -115,6 +115,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3792. Test case failures in TestDistributedShell and some issue fixes
 related to ATSV2 (Naganarasimha G R via sjlee)
 
+YARN-3908. Fixed bugs in HBaseTimelineWriterImpl. (Vrushali C and Sangjin
+Lee via zjshen)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3b86a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEvent.java
index 1dbf7e5..a563658 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEvent.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEvent.java
@@ -33,6 +33,8 @@ import java.util.Map;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public class TimelineEvent implements Comparable {
+  public static final long INVALID_TIMESTAMP = 0L;
+
   private String id;
   private HashMap info = new HashMap<>();
   private long timestamp;
@@ -83,7 +85,7 @@ public class TimelineEvent implements 
Comparable {
   }
 
   public boolean isValid() {
-return (id != null && timestamp != 0L);
+return (id != null && timestamp != INVALID_TIMESTAMP);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3b86a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index 876ad6a..cd2e76e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -141,6 +141,13 @@ public class HBaseTimelineWriterImpl extends 
AbstractService implements
 EntityColumn.MODIFIED_TIME.store(rowKey, entityTable, null,
 te.getModifiedTime());
 EntityColumn.FLOW_VERSION.store(rowKey, entityTable, null, flowVersion);
+Map info = te.getInfo();
+if (info != null) {
+  for (Map.Entry entry : info.entrySet()) {
+EntityColumnPrefix.

[28/50] [abbrv] hadoop git commit: YARN-3792. Test case failures in TestDistributedShell and some issue fixes related to ATSV2 (Naganarasimha G R via sjlee)

2015-08-25 Thread sjlee
YARN-3792. Test case failures in TestDistributedShell and some issue fixes 
related to ATSV2 (Naganarasimha G R via sjlee)

(cherry picked from commit 84f37f1c7eefec6d139cbf091c50d6c06f734323)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5751abd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5751abd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5751abd1

Branch: refs/heads/YARN-2928
Commit: 5751abd12891799fa3e59058e736661f6abb89b6
Parents: 42ed062
Author: Sangjin Lee 
Authored: Mon Jun 22 20:47:56 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 33 +++
 .../applications/distributedshell/Client.java   |  2 +-
 .../distributedshell/TestDistributedShell.java  | 91 +---
 .../TestDistributedShellWithNodeLabels.java |  9 +-
 .../client/api/impl/TimelineClientImpl.java |  8 ++
 .../application/ApplicationImpl.java|  4 +-
 .../monitor/ContainersMonitorImpl.java  | 15 ++--
 .../RMTimelineCollectorManager.java |  2 +-
 .../collector/NodeTimelineCollectorManager.java | 14 ---
 .../PerNodeTimelineCollectorsAuxService.java|  3 +-
 .../collector/TimelineCollectorManager.java |  2 +-
 11 files changed, 107 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5751abd1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1e85cdf..a294fc5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -35,9 +35,6 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-. Rename TimelineAggregator etc. to TimelineCollector. (Sangjin 
Lee
 via junping_du)
 
-YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
-(Sangjin Lee via zjshen)
-
 YARN-3034. Implement RM starting its timeline collector. (Naganarasimha G R
 via junping_du)
 
@@ -61,27 +58,15 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3551. Consolidate data model change according to the backend
 implementation (Zhijie Shen via sjlee)
 
-YARN-3562. unit tests failures and issues found from findbug from earlier
-ATS checkins (Naganarasimha G R via sjlee)
-
 YARN-3134. Implemented Phoenix timeline writer to access HBase backend. (Li
 Lu via zjshen)
 
 YARN-3529. Added mini HBase cluster and Phoenix support to timeline service
 v2 unit tests. (Li Lu via zjshen)
 
-YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
-Sangjin Lee via junping_du)
-
 YARN-3411. [Storage implementation] explore the native HBase write schema
 for storage (Vrushali C via sjlee)
 
-YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
-test data (Vrushali C via sjlee)
-
-YARN-3721. build is broken on YARN-2928 branch due to possible dependency
-cycle (Li Lu via sjlee)
-
 YARN-3044. Made RM write app, attempt and optional container lifecycle
 events to timeline service v2. (Naganarasimha G R via zjshen)
 
@@ -100,6 +85,24 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   BUG FIXES
 
+YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
+(Sangjin Lee via zjshen)
+
+YARN-3562. unit tests failures and issues found from findbug from earlier
+ATS checkins (Naganarasimha G R via sjlee)
+
+YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
+Sangjin Lee via junping_du)
+
+YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
+test data (Vrushali C via sjlee)
+
+YARN-3721. build is broken on YARN-2928 branch due to possible dependency
+cycle (Li Lu via sjlee)
+
+YARN-3792. Test case failures in TestDistributedShell and some issue fixes
+related to ATSV2 (Naganarasimha G R via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5751abd1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index ff2f594..a588544 1

[42/50] [abbrv] hadoop git commit: YARN-3984. Adjusted the event column key schema and avoided missing empty event. Contributed by Vrushali C.

2015-08-25 Thread sjlee
YARN-3984. Adjusted the event column key schema and avoided missing empty 
event. Contributed by Vrushali C.

(cherry picked from commit 895ccfa1ab9e701f2908586e323249f670fe5544)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1a1a22a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1a1a22a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1a1a22a

Branch: refs/heads/YARN-2928
Commit: d1a1a22a1341c06160a98de36a04e80c118df292
Parents: d93e636
Author: Zhijie Shen 
Authored: Wed Aug 5 16:28:57 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/HBaseTimelineWriterImpl.java|  23 +++-
 .../storage/common/TimelineWriterUtils.java |  13 +++
 .../storage/entity/EntityRowKey.java|  18 +---
 .../storage/TestHBaseTimelineWriterImpl.java| 105 ---
 5 files changed, 128 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1a1a22a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f5ce32a..a484040 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -121,6 +121,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3993. Changed to use the AM flag in ContainerContext determine AM
 container in TestPerNodeTimelineCollectorsAuxService. (Sunil G via zjshen)
 
+YARN-3984. Adjusted the event column key schema and avoided missing empty
+event. (Vrushali C via zjshen)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1a1a22a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index cd2e76e..3173e87 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -37,6 +37,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineWriterUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
@@ -200,20 +201,32 @@ public class HBaseTimelineWriterImpl extends 
AbstractService implements
   "! Using the current timestamp");
   eventTimestamp = System.currentTimeMillis();
 }
+byte[] columnQualifierFirst =
+Bytes.toBytes(Separator.VALUES.encode(eventId));
+byte[] columnQualifierWithTsBytes =
+Separator.VALUES.join(columnQualifierFirst,
+Bytes.toBytes(TimelineWriterUtils.invert(eventTimestamp)));
 Map eventInfo = event.getInfo();
-if (eventInfo != null) {
+if ((eventInfo == null) || (eventInfo.size() == 0)) {
+  // add separator since event key is empty
+  byte[] compoundColumnQualifierBytes =
+  Separator.VALUES.join(columnQualifierWithTsBytes,
+  null);
+  String compoundColumnQualifier =
+  Bytes.toString(compoundColumnQualifierBytes);
+  EntityColumnPrefix.EVENT.store(rowKey, entityTable,
+  compoundColumnQualifier, null, 
TimelineWriterUtils.EMPTY_BYTES);
+} else {
   for (Map.Entry info : eventInfo.entrySet()) {
 // eventId?infoKey
-

[44/50] [abbrv] hadoop git commit: MAPREDUCE-6370. Made the timeline service v2 test driver write event ID. Contributed by Li Lu.

2015-08-25 Thread sjlee
MAPREDUCE-6370. Made the timeline service v2 test driver write event ID. 
Contributed by Li Lu.

(cherry picked from commit 827633ee9fee26e3e15343cbccb0b7905ae02170)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40d9d469
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40d9d469
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40d9d469

Branch: refs/heads/YARN-2928
Commit: 40d9d4690568476fb43c006983a91304434aa066
Parents: 09a8b7b
Author: Zhijie Shen 
Authored: Fri May 22 00:00:05 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java| 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d9d469/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9c66a5e..e6348b9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -21,6 +21,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   BUG FIXES
 
+MAPREDUCE-6370. Made the timeline service v2 test driver write event ID.
+(Li Lu via zjshen)
+
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d9d469/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
index 4ef0a14..625c32a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
@@ -97,6 +97,7 @@ class SimpleEntityWriter extends EntityWriter {
 entity.addInfo("PERF_TEST", payLoad);
 // add an event
 TimelineEvent event = new TimelineEvent();
+event.setId("foo_event_id");
 event.setTimestamp(System.currentTimeMillis());
 event.addInfo("foo_event", "test");
 entity.addEvent(event);



[47/50] [abbrv] hadoop git commit: YARN-3814. REST API implementation for getting raw entities in TimelineReader (Naganarasimha G R via sjlee)

2015-08-25 Thread sjlee
YARN-3814. REST API implementation for getting raw entities in TimelineReader 
(Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afa54d2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afa54d2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afa54d2f

Branch: refs/heads/YARN-2928
Commit: afa54d2fcc2e83dbb036a06014b3f6426555f598
Parents: e979f30
Author: Sangjin Lee 
Authored: Fri Aug 21 19:10:23 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:52:45 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../reader/TimelineReaderManager.java   |  41 ++
 .../reader/TimelineReaderServer.java|   2 +-
 .../reader/TimelineReaderWebServices.java   | 245 +-
 .../storage/FileSystemTimelineReaderImpl.java   |   5 +
 .../reader/TestTimelineReaderWebServices.java   | 456 ++-
 6 files changed, 741 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afa54d2f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0041f7f..b1d2bd6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -97,6 +97,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4025. Deal with byte representations of Longs in writer code.
 (Sangjin Lee and Vrushali C via junping_du)
 
+YARN-3814. REST API implementation for getting raw entities in
+TimelineReader (Naganarasimha G R via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afa54d2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
index 5573185..7fafd82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
@@ -18,10 +18,18 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 
 @Private
 @Unstable
@@ -33,4 +41,37 @@ public class TimelineReaderManager extends AbstractService {
 super(TimelineReaderManager.class.getName());
 this.reader = timelineReader;
   }
+
+  /**
+   * Get a set of entities matching given predicates. The meaning of each
+   * argument has been documented with {@link TimelineReader#getEntities}.
+   *
+   * @see TimelineReader#getEntities
+   */
+  Set getEntities(String userId, String clusterId,
+  String flowId, Long flowRunId, String appId, String entityType,
+  Long limit, Long createdTimeBegin, Long createdTimeEnd,
+  Long modifiedTimeBegin, Long modifiedTimeEnd,
+  Map> relatesTo, Map> isRelatedTo,
+  Map infoFilters, Map configFilters,
+  Set  metricFilters, Set eventFilters,
+  EnumSet fieldsToRetrieve) throws IOException {
+return reader.getEntities(userId, clusterId, flowId, flowRunId, appId,
+entityType, limit, createdTimeBegin, createdTimeEnd, modifiedTimeBegin,
+modifiedTimeEnd, relatesTo, isRelatedTo, infoFilters, configFilters,
+metricFilters, eventFilters, fieldsToRetrieve);
+  }
+
+  /**
+   * Get single timeline entity. The meaning of each argument has been
+   * documented with {@link TimelineReader#getEntity}.
+   *
+   * @see TimelineReader#getEnt

[50/50] [abbrv] hadoop git commit: YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via sjlee)

2015-08-25 Thread sjlee
YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via 
sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be95107a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be95107a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be95107a

Branch: refs/heads/YARN-2928
Commit: be95107aa5fc4de6c8c3f10e55a1495d305d6ab4
Parents: afa54d2
Author: Sangjin Lee 
Authored: Mon Aug 24 17:36:31 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:52:45 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../containermanager/ContainerManagerImpl.java  | 33 +++-
 .../metrics/TimelineServiceV2Publisher.java |  2 --
 .../TestSystemMetricsPublisherForV2.java|  8 -
 4 files changed, 28 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be95107a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b1d2bd6..4414178 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)
 
+YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R
+via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be95107a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 2ea2ec1..50f2dfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -896,21 +896,24 @@ public class ContainerManagerImpl extends 
CompositeService implements
 if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
   flowRunId = Long.parseLong(flowRunIdStr);
 }
-Application application = new ApplicationImpl(dispatcher, user,
-flowName, flowVersion, flowRunId, applicationID, credentials, 
context);
-if (null == context.getApplications().putIfAbsent(applicationID,
-  application)) {
-  LOG.info("Creating a new application reference for app " + 
applicationID);
-  LogAggregationContext logAggregationContext =
-  containerTokenIdentifier.getLogAggregationContext();
-  Map appAcls =
-  container.getLaunchContext().getApplicationACLs();
-  context.getNMStateStore().storeApplication(applicationID,
-  buildAppProto(applicationID, user, credentials, appAcls,
-logAggregationContext));
-  dispatcher.getEventHandler().handle(
-new ApplicationInitEvent(applicationID, appAcls,
-  logAggregationContext));
+if (!context.getApplications().containsKey(applicationID)) {
+  Application application =
+  new ApplicationImpl(dispatcher, user, flowName, flowVersion,
+  flowRunId, applicationID, credentials, context);
+  if (context.getApplications().putIfAbsent(applicationID,
+  application) == null) {
+LOG.info("Creating a new application reference for app "
++ applicationID);
+LogAggregationContext logAggregationContext =
+containerTokenIdentifier.getLogAggregationContext();
+Map appAcls =
+container.getLaunchContext().getApplicationACLs();
+context.getNMStateStore().storeApplication(applicationID,
+buildAppProto(applicationID, user, credentials, appAcls,
+logAggregationContext));
+dispatcher.getEventHandler().handle(new ApplicationInitEvent(
+applicationID, appAcls, logAggregationContext));
+  }
 }
 
 dispatcher.getEventHandler().handle(

http://git-wip-us.apa

[38/50] [abbrv] hadoop git commit: YARN-3906. Split the application table from the entity table. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3906. Split the application table from the entity table. Contributed by 
Sangjin Lee.

(cherry picked from commit bcd755eba9466ce277d3c14192c31da6462c4ab3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51029a75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51029a75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51029a75

Branch: refs/heads/YARN-2928
Commit: 51029a751236b1cff4c92bab5612395c76f07d8e
Parents: 4bdf34f
Author: Junping Du 
Authored: Tue Aug 11 16:59:21 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:47:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/HBaseTimelineReaderImpl.java| 202 
 .../storage/HBaseTimelineWriterImpl.java| 145 ++---
 .../storage/TimelineSchemaCreator.java  |  10 +
 .../storage/application/ApplicationColumn.java  | 136 
 .../application/ApplicationColumnFamily.java|  65 
 .../application/ApplicationColumnPrefix.java| 217 +
 .../storage/application/ApplicationRowKey.java  |  67 
 .../storage/application/ApplicationTable.java   | 164 ++
 .../storage/application/package-info.java   |  25 ++
 .../storage/entity/EntityColumnPrefix.java  |   2 +-
 .../storage/entity/EntityTable.java |  59 ++--
 .../storage/TestHBaseTimelineWriterImpl.java| 321 ---
 13 files changed, 1230 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51029a75/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2e06793..a4aeef8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -85,6 +85,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3049. [Storage Implementation] Implement storage reader interface to
 fetch raw data from HBase backend (Zhijie Shen via sjlee)
 
+YARN-3906. Split the application table from the entity table. (Sangjin Lee 
+via junping_du)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51029a75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index 5258b9c..094f868 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -18,7 +18,19 @@
 package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -32,11 +44,17 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTa

[46/50] [abbrv] hadoop git commit: YARN-3045. Implement NM writing container lifecycle events to Timeline Service v2. Contributed by Naganarasimha G R.

2015-08-25 Thread sjlee
YARN-3045. Implement NM writing container lifecycle events to Timeline Service 
v2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/702a2142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/702a2142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/702a2142

Branch: refs/heads/YARN-2928
Commit: 702a2142b15c45f12aa79c28c5148984e0452ed3
Parents: 97f211b
Author: Junping Du 
Authored: Tue Aug 18 04:31:45 2015 -0700
Committer: Sangjin Lee 
Committed: Tue Aug 25 10:52:44 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|  16 +-
 .../distributedshell/TestDistributedShell.java  |  28 +-
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  13 +
 .../containermanager/ContainerManagerImpl.java  |  48 ++-
 .../ApplicationContainerFinishedEvent.java  |  17 +-
 .../containermanager/container/Container.java   |   3 +
 .../container/ContainerImpl.java|  29 +-
 .../monitor/ContainersMonitorImpl.java  | 108 +-
 .../timelineservice/NMTimelineEvent.java|  31 ++
 .../timelineservice/NMTimelineEventType.java|  24 ++
 .../timelineservice/NMTimelinePublisher.java| 376 +++
 .../nodemanager/TestNodeStatusUpdater.java  |  24 +-
 .../containermanager/TestAuxServices.java   |   4 +-
 .../TestContainerManagerRecovery.java   |   8 +
 .../application/TestApplication.java|   6 +-
 .../container/TestContainer.java|   2 +-
 .../nodemanager/webapp/MockContainer.java   |   6 +
 .../nodemanager/webapp/TestNMWebServer.java |   3 +-
 .../PerNodeTimelineCollectorsAuxService.java|  16 +-
 ...TestPerNodeTimelineCollectorsAuxService.java |   9 +
 22 files changed, 636 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3e1f212..492a098 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -91,6 +91,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3904. Refactor timelineservice.storage to add support to online and
 offline aggregation writers (Li Lu via sjlee)
 
+YARN-3045. Implement NM writing container lifecycle events to Timeline
+Service v2. (Naganarasimha G R via junping_du)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 691170e..62e60a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -110,6 +110,16 @@
 
   
 
+  
+  
+
+ 
+  
+
+  
+
+ 
+  
 
   
   
@@ -505,10 +515,4 @@
 
 
   
-
-  
-  
-
- 
-  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index b8a7abf..c89bee9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache

Git Push Summary

2015-08-25 Thread sjlee
Repository: hadoop
Updated Tags:  refs/tags/feature_YARN-2928_2015-08-24 [created] a73d4b204


hadoop git commit: HDFS-8846. Add a unit test for INotify functionality across a layout version upgrade (Zhe Zhang via Colin P. McCabe)

2015-08-25 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk eee0d4563 -> a4d9acc51


HDFS-8846. Add a unit test for INotify functionality across a layout version 
upgrade (Zhe Zhang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4d9acc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4d9acc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4d9acc5

Branch: refs/heads/trunk
Commit: a4d9acc51d1a977bc333da17780c00c72e8546f1
Parents: eee0d45
Author: Colin Patrick Mccabe 
Authored: Tue Aug 25 14:09:13 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Tue Aug 25 14:29:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/TestDFSInotifyEventInputStream.java|   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  |  78 +-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java| 107 ++-
 .../src/test/resources/hadoop-252-dfs-dir.tgz   | Bin 0 -> 14112 bytes
 5 files changed, 108 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c47b50..fd91744 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -835,6 +835,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
 
+HDFS-8846. Add a unit test for INotify functionality across a layout
+version upgrade (Zhe Zhang via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index e7bbcac..97f34f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -51,7 +51,7 @@ public class TestDFSInotifyEventInputStream {
   private static final Log LOG = LogFactory.getLog(
   TestDFSInotifyEventInputStream.class);
 
-  private static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
+  public static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
 throws IOException, MissingEventsException {
 EventBatch batch = null;
 while ((batch = eis.poll()) == null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 8cc47c3..fe1ede0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -28,18 +28,12 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.util.List;
 import java.util.regex.Pattern;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.inotify.Event;
-import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -48,21 +42,13 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
-import org.apache.hadoop.hd

hadoop git commit: HDFS-8846. Add a unit test for INotify functionality across a layout version upgrade (Zhe Zhang via Colin P. McCabe)

2015-08-25 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 df5dbf317 -> 9264b7e11


HDFS-8846. Add a unit test for INotify functionality across a layout version 
upgrade (Zhe Zhang via Colin P. McCabe)

(cherry picked from commit a4d9acc51d1a977bc333da17780c00c72e8546f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9264b7e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9264b7e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9264b7e1

Branch: refs/heads/branch-2
Commit: 9264b7e119efb70fb355904652beeb97e7ad90b9
Parents: df5dbf3
Author: Colin Patrick Mccabe 
Authored: Tue Aug 25 14:09:13 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Tue Aug 25 14:30:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/TestDFSInotifyEventInputStream.java|   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  |  78 +-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java| 107 ++-
 .../src/test/resources/hadoop-252-dfs-dir.tgz   | Bin 0 -> 14112 bytes
 5 files changed, 108 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9264b7e1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59817d8..5373e66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -490,6 +490,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
 
+HDFS-8846. Add a unit test for INotify functionality across a layout
+version upgrade (Zhe Zhang via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9264b7e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index e7bbcac..97f34f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -51,7 +51,7 @@ public class TestDFSInotifyEventInputStream {
   private static final Log LOG = LogFactory.getLog(
   TestDFSInotifyEventInputStream.class);
 
-  private static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
+  public static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
 throws IOException, MissingEventsException {
 EventBatch batch = null;
 while ((batch = eis.poll()) == null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9264b7e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 8cc47c3..fe1ede0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -28,18 +28,12 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.util.List;
 import java.util.regex.Pattern;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.inotify.Event;
-import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -48,21 +42,13 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
-import org.apache.hadoo

[18/50] [abbrv] hadoop git commit: HADOOP-12061. Incorrect command in single cluster setup document. Contributed by Kengo Seki.

2015-08-25 Thread aengineer
HADOOP-12061. Incorrect command in single cluster setup document. Contributed 
by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36b1a1e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36b1a1e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36b1a1e7

Branch: refs/heads/HDFS-7240
Commit: 36b1a1e784789170350bcd78f394129ce50ba4e4
Parents: 1e06299
Author: Akira Ajisaka 
Authored: Thu Aug 20 11:09:45 2015 +0900
Committer: Akira Ajisaka 
Committed: Thu Aug 20 11:09:45 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/site/markdown/SingleCluster.md.vm   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 943dbac..c033f05 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1096,6 +1096,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
 (Brahma Reddy Battula via jianhe)
 
+HADOOP-12061. Incorrect command in single cluster setup document.
+(Kengo Seki via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index ca5b48c..2de8b2b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -140,7 +140,7 @@ If you cannot ssh to localhost without a passphrase, 
execute the following comma
 
   $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
   $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
-  $ chmod 0700 ~/.ssh/authorized_keys
+  $ chmod 0600 ~/.ssh/authorized_keys
 
 $H3 Execution
 



[02/50] [abbrv] hadoop git commit: HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)

2015-08-25 Thread aengineer
HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71566e23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71566e23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71566e23

Branch: refs/heads/HDFS-7240
Commit: 71566e23820d33e0110ca55eded3299735e970b9
Parents: 51a0096
Author: yliu 
Authored: Tue Aug 18 09:23:06 2015 +0800
Committer: yliu 
Committed: Tue Aug 18 09:23:06 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d1b04dc..132adc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -820,6 +820,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
 Colin P. McCabe)
 
+HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cde6588..aad7fec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -33,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
@@ -204,7 +203,7 @@ public class BlockManager implements BlockStatsMXBean {
* DataNode. We'll eventually remove these extras.
*/
   public final Map> excessReplicateMap =
-new TreeMap>();
+new HashMap<>();
 
   /**
* Store set of Blocks that need to be replicated 1 or more times.



  1   2   >